summaryrefslogtreecommitdiff
path: root/drivers
diff options
context:
space:
mode:
Diffstat (limited to 'drivers')
-rw-r--r--drivers/ata/ahci_platform.c2
-rw-r--r--drivers/ata/libata-acpi.c11
-rw-r--r--drivers/ata/libata-core.c4
-rw-r--r--drivers/ata/libata-scsi.c2
-rw-r--r--drivers/ata/pata_arasan_cf.c8
-rw-r--r--drivers/ata/sata_highbank.c4
-rw-r--r--drivers/ata/sata_svw.c35
-rw-r--r--drivers/base/platform.c7
-rw-r--r--drivers/base/power/qos.c2
-rw-r--r--drivers/block/aoe/aoecmd.c2
-rw-r--r--drivers/block/floppy.c5
-rw-r--r--drivers/block/mtip32xx/mtip32xx.c18
-rw-r--r--drivers/block/mtip32xx/mtip32xx.h6
-rw-r--r--drivers/bluetooth/ath3k.c1
-rw-r--r--drivers/bluetooth/btusb.c1
-rw-r--r--drivers/bus/omap-ocp2scp.c68
-rw-r--r--drivers/clk/ux500/u8500_clk.c50
-rw-r--r--drivers/edac/amd64_edac.h2
-rw-r--r--drivers/edac/edac_stub.c2
-rw-r--r--drivers/edac/mce_amd_inj.c4
-rw-r--r--drivers/firewire/sbp2.c2
-rw-r--r--drivers/gpio/Kconfig4
-rw-r--r--drivers/gpio/gpio-mcp23s08.c6
-rw-r--r--drivers/gpio/gpio-mvebu.c23
-rw-r--r--drivers/gpu/drm/Makefile3
-rw-r--r--drivers/gpu/drm/ast/ast_ttm.c10
-rw-r--r--drivers/gpu/drm/cirrus/cirrus_ttm.c10
-rw-r--r--drivers/gpu/drm/drm_crtc.c31
-rw-r--r--drivers/gpu/drm/drm_crtc_helper.c66
-rw-r--r--drivers/gpu/drm/drm_dp_helper.c21
-rw-r--r--drivers/gpu/drm/drm_edid.c2
-rw-r--r--drivers/gpu/drm/drm_fb_helper.c21
-rw-r--r--drivers/gpu/drm/drm_fops.c44
-rw-r--r--drivers/gpu/drm/drm_hashtab.c26
-rw-r--r--drivers/gpu/drm/drm_irq.c3
-rw-r--r--drivers/gpu/drm/drm_pci.c2
-rw-r--r--drivers/gpu/drm/drm_sysfs.c6
-rw-r--r--drivers/gpu/drm/exynos/Kconfig30
-rw-r--r--drivers/gpu/drm/exynos/Makefile5
-rw-r--r--drivers/gpu/drm/exynos/exynos_ddc.c4
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_buf.c115
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_buf.h4
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_crtc.c5
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_dmabuf.c150
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_drv.c117
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_drv.h43
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_encoder.c42
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_encoder.h1
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_fb.c94
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_fbdev.c73
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_fimc.c2001
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_fimc.h37
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_fimd.c204
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_g2d.c495
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_gem.c435
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_gem.h58
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_gsc.c1870
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_gsc.h38
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_hdmi.c59
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_hdmi.h3
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_iommu.c150
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_iommu.h85
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_ipp.c2060
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_ipp.h266
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_plane.c13
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_rotator.c855
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_rotator.h33
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_vidi.c26
-rw-r--r--drivers/gpu/drm/exynos/exynos_hdmi.c324
-rw-r--r--drivers/gpu/drm/exynos/exynos_hdmiphy.c4
-rw-r--r--drivers/gpu/drm/exynos/exynos_mixer.c376
-rw-r--r--drivers/gpu/drm/exynos/regs-fimc.h669
-rw-r--r--drivers/gpu/drm/exynos/regs-gsc.h284
-rw-r--r--drivers/gpu/drm/exynos/regs-hdmi.h22
-rw-r--r--drivers/gpu/drm/exynos/regs-rotator.h73
-rw-r--r--drivers/gpu/drm/gma500/cdv_device.c4
-rw-r--r--drivers/gpu/drm/gma500/cdv_intel_dp.c2
-rw-r--r--drivers/gpu/drm/gma500/cdv_intel_hdmi.c6
-rw-r--r--drivers/gpu/drm/gma500/cdv_intel_lvds.c10
-rw-r--r--drivers/gpu/drm/gma500/mdfld_dsi_output.c10
-rw-r--r--drivers/gpu/drm/gma500/mdfld_intel_display.c2
-rw-r--r--drivers/gpu/drm/gma500/oaktrail_crtc.c2
-rw-r--r--drivers/gpu/drm/gma500/oaktrail_lvds.c8
-rw-r--r--drivers/gpu/drm/gma500/psb_intel_lvds.c10
-rw-r--r--drivers/gpu/drm/gma500/psb_intel_sdvo.c24
-rw-r--r--drivers/gpu/drm/i2c/ch7006_drv.c20
-rw-r--r--drivers/gpu/drm/i915/intel_bios.c11
-rw-r--r--drivers/gpu/drm/i915/intel_crt.c2
-rw-r--r--drivers/gpu/drm/i915/intel_display.c11
-rw-r--r--drivers/gpu/drm/i915/intel_dp.c4
-rw-r--r--drivers/gpu/drm/i915/intel_pm.c12
-rw-r--r--drivers/gpu/drm/i915/intel_sdvo.c39
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_ttm.c10
-rw-r--r--drivers/gpu/drm/nouveau/Makefile38
-rw-r--r--drivers/gpu/drm/nouveau/core/core/engctx.c15
-rw-r--r--drivers/gpu/drm/nouveau/core/core/falcon.c247
-rw-r--r--drivers/gpu/drm/nouveau/core/core/gpuobj.c4
-rw-r--r--drivers/gpu/drm/nouveau/core/core/mm.c17
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/bsp/nv84.c108
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/bsp/nvc0.c110
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/bsp/nve0.c110
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/copy/nva3.c124
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/copy/nvc0.c167
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/copy/nve0.c54
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/crypt/nv84.c46
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/crypt/nv98.c83
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/disp/dacnv50.c88
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/disp/hdanva3.c48
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/disp/hdanvd0.c53
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/disp/hdminv84.c66
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/disp/hdminva3.c66
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/disp/hdminvd0.c62
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/disp/nv50.c1173
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/disp/nv50.h142
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/disp/nv84.c98
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/disp/nv94.c109
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/disp/nva0.c88
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/disp/nva3.c111
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/disp/nvd0.c884
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/disp/nve0.c94
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/disp/sornv50.c112
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/disp/sornv94.c190
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/disp/sornvd0.c126
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/dmaobj/base.c71
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/dmaobj/nv04.c68
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/dmaobj/nv50.c126
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/dmaobj/nvc0.c104
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/dmaobj/nvd0.c122
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/fifo/base.c19
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/fifo/nv04.c17
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/fifo/nv10.c6
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/fifo/nv17.c8
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/fifo/nv40.c8
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/fifo/nv50.c36
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/fifo/nv84.c60
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/fifo/nvc0.c26
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/fifo/nve0.c21
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/graph/ctxnv40.c12
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/graph/nv04.c184
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/graph/nv10.c10
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/graph/nv20.c6
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/graph/nv40.c46
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/graph/nv40.h2
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/graph/nv50.c83
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/graph/nvc0.c2
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/graph/regs.h5
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/mpeg/nv31.c6
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/mpeg/nv40.c2
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/mpeg/nv50.c1
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/ppp/nv98.c107
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/ppp/nvc0.c110
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/software/nv04.c4
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/software/nv10.c2
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/software/nv50.c10
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/software/nvc0.c10
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/vp/nv84.c108
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/vp/nvc0.c110
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/vp/nve0.c110
-rw-r--r--drivers/gpu/drm/nouveau/core/include/core/class.h225
-rw-r--r--drivers/gpu/drm/nouveau/core/include/core/engctx.h3
-rw-r--r--drivers/gpu/drm/nouveau/core/include/core/falcon.h81
-rw-r--r--drivers/gpu/drm/nouveau/core/include/core/gpuobj.h4
-rw-r--r--drivers/gpu/drm/nouveau/core/include/core/mm.h6
-rw-r--r--drivers/gpu/drm/nouveau/core/include/core/object.h55
-rw-r--r--drivers/gpu/drm/nouveau/core/include/core/parent.h2
-rw-r--r--drivers/gpu/drm/nouveau/core/include/engine/bsp.h41
-rw-r--r--drivers/gpu/drm/nouveau/core/include/engine/copy.h39
-rw-r--r--drivers/gpu/drm/nouveau/core/include/engine/crypt.h39
-rw-r--r--drivers/gpu/drm/nouveau/core/include/engine/disp.h5
-rw-r--r--drivers/gpu/drm/nouveau/core/include/engine/dmaobj.h29
-rw-r--r--drivers/gpu/drm/nouveau/core/include/engine/fifo.h6
-rw-r--r--drivers/gpu/drm/nouveau/core/include/engine/ppp.h40
-rw-r--r--drivers/gpu/drm/nouveau/core/include/engine/vp.h41
-rw-r--r--drivers/gpu/drm/nouveau/core/include/subdev/bios/dcb.h34
-rw-r--r--drivers/gpu/drm/nouveau/core/include/subdev/bios/disp.h48
-rw-r--r--drivers/gpu/drm/nouveau/core/include/subdev/bios/dp.h32
-rw-r--r--drivers/gpu/drm/nouveau/core/include/subdev/clock.h3
-rw-r--r--drivers/gpu/drm/nouveau/core/include/subdev/fb.h43
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/bar/base.c4
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/bios/base.c32
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/bios/dcb.c65
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/bios/disp.c178
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/bios/dp.c182
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/bios/gpio.c4
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/bios/init.c13
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/clock/nva3.c19
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/clock/nvc0.c1
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/device/base.c28
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/device/nv10.c4
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/device/nv20.c6
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/device/nv30.c6
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/device/nv40.c28
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/device/nv50.c26
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/device/nvc0.c64
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/device/nve0.c17
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/devinit/nv50.c34
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/fb/base.c92
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/fb/nv04.c62
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/fb/nv10.c52
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/fb/nv1a.c89
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/fb/nv20.c86
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/fb/nv25.c81
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/fb/nv30.c51
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/fb/nv35.c82
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/fb/nv36.c82
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/fb/nv40.c131
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/fb/nv41.c106
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/fb/nv44.c114
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/fb/nv46.c79
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/fb/nv47.c66
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/fb/nv49.c84
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/fb/nv4e.c72
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/fb/nv50.c393
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/fb/nvc0.c126
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/i2c/aux.c2
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/instmem/nv04.c8
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/instmem/nv40.c4
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/instmem/nv50.c4
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/mc/base.c8
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/mc/nv50.c1
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/mc/nv98.c1
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/mc/nvc0.c1
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/vm/nv41.c2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_abi16.c29
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_acpi.c30
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_acpi.h4
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_bios.c235
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_bios.h9
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_bo.c55
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_bo.h2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_chan.c6
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_connector.c27
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_connector.h16
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_crtc.h10
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_display.c25
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_dp.c141
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_drm.c100
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_drm.h4
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_encoder.h7
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_gem.c2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_hdmi.c261
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_irq.c12
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_prime.c4
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_vga.c5
-rw-r--r--drivers/gpu/drm/nouveau/nv04_crtc.c6
-rw-r--r--drivers/gpu/drm/nouveau/nv04_display.c5
-rw-r--r--drivers/gpu/drm/nouveau/nv10_fence.c7
-rw-r--r--drivers/gpu/drm/nouveau/nv17_tv.c16
-rw-r--r--drivers/gpu/drm/nouveau/nv50_crtc.c764
-rw-r--r--drivers/gpu/drm/nouveau/nv50_cursor.c136
-rw-r--r--drivers/gpu/drm/nouveau/nv50_dac.c321
-rw-r--r--drivers/gpu/drm/nouveau/nv50_display.c2547
-rw-r--r--drivers/gpu/drm/nouveau/nv50_display.h71
-rw-r--r--drivers/gpu/drm/nouveau/nv50_evo.c403
-rw-r--r--drivers/gpu/drm/nouveau/nv50_evo.h120
-rw-r--r--drivers/gpu/drm/nouveau/nv50_fence.c5
-rw-r--r--drivers/gpu/drm/nouveau/nv50_pm.c2
-rw-r--r--drivers/gpu/drm/nouveau/nv50_sor.c530
-rw-r--r--drivers/gpu/drm/nouveau/nvc0_fence.c28
-rw-r--r--drivers/gpu/drm/nouveau/nvd0_display.c2141
-rw-r--r--drivers/gpu/drm/radeon/atombios_crtc.c46
-rw-r--r--drivers/gpu/drm/radeon/atombios_encoders.c4
-rw-r--r--drivers/gpu/drm/radeon/evergreen.c222
-rw-r--r--drivers/gpu/drm/radeon/evergreen_cs.c742
-rw-r--r--drivers/gpu/drm/radeon/evergreend.h135
-rw-r--r--drivers/gpu/drm/radeon/ni.c357
-rw-r--r--drivers/gpu/drm/radeon/nid.h86
-rw-r--r--drivers/gpu/drm/radeon/r100.c23
-rw-r--r--drivers/gpu/drm/radeon/r600.c472
-rw-r--r--drivers/gpu/drm/radeon/r600_cs.c357
-rw-r--r--drivers/gpu/drm/radeon/r600_reg.h9
-rw-r--r--drivers/gpu/drm/radeon/r600d.h86
-rw-r--r--drivers/gpu/drm/radeon/radeon.h38
-rw-r--r--drivers/gpu/drm/radeon/radeon_agp.c5
-rw-r--r--drivers/gpu/drm/radeon/radeon_asic.c198
-rw-r--r--drivers/gpu/drm/radeon/radeon_asic.h34
-rw-r--r--drivers/gpu/drm/radeon/radeon_combios.c6
-rw-r--r--drivers/gpu/drm/radeon/radeon_connectors.c62
-rw-r--r--drivers/gpu/drm/radeon/radeon_cp.c14
-rw-r--r--drivers/gpu/drm/radeon/radeon_cs.c13
-rw-r--r--drivers/gpu/drm/radeon/radeon_cursor.c17
-rw-r--r--drivers/gpu/drm/radeon/radeon_device.c1
-rw-r--r--drivers/gpu/drm/radeon/radeon_display.c4
-rw-r--r--drivers/gpu/drm/radeon/radeon_drv.c5
-rw-r--r--drivers/gpu/drm/radeon/radeon_drv.h1
-rw-r--r--drivers/gpu/drm/radeon/radeon_fence.c2
-rw-r--r--drivers/gpu/drm/radeon/radeon_gart.c1
-rw-r--r--drivers/gpu/drm/radeon/radeon_kms.c16
-rw-r--r--drivers/gpu/drm/radeon/radeon_object.c46
-rw-r--r--drivers/gpu/drm/radeon/radeon_test.c37
-rw-r--r--drivers/gpu/drm/radeon/radeon_ttm.c31
-rw-r--r--drivers/gpu/drm/radeon/rv515.c122
-rw-r--r--drivers/gpu/drm/radeon/rv770.c31
-rw-r--r--drivers/gpu/drm/radeon/rv770d.h71
-rw-r--r--drivers/gpu/drm/radeon/si.c356
-rw-r--r--drivers/gpu/drm/radeon/sid.h120
-rw-r--r--drivers/gpu/drm/shmobile/shmob_drm_crtc.c2
-rw-r--r--drivers/gpu/drm/tegra/dc.c3
-rw-r--r--drivers/gpu/drm/tegra/hdmi.c2
-rw-r--r--drivers/gpu/drm/tegra/host1x.c3
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo.c271
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo_util.c6
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo_vm.c4
-rw-r--r--drivers/gpu/drm/ttm/ttm_execbuf_util.c4
-rw-r--r--drivers/gpu/drm/ttm/ttm_object.c23
-rw-r--r--drivers/gpu/drm/ttm/ttm_page_alloc.c5
-rw-r--r--drivers/gpu/drm/ttm/ttm_tt.c4
-rw-r--r--drivers/gpu/drm/udl/udl_connector.c2
-rw-r--r--drivers/gpu/drm/vmwgfx/Makefile3
-rw-r--r--drivers/gpu/drm/vmwgfx/svga3d_surfacedefs.h909
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c9
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_context.c274
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_dmabuf.c22
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_drv.c37
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_drv.h146
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c902
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_fence.c2
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c9
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c2
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_resource.c2009
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_resource_priv.h84
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c2
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_surface.c893
-rw-r--r--drivers/hid/hid-microsoft.c6
-rw-r--r--drivers/hid/hidraw.c69
-rw-r--r--drivers/hwmon/asb100.c2
-rw-r--r--drivers/hwmon/w83627ehf.c1
-rw-r--r--drivers/hwmon/w83627hf.c2
-rw-r--r--drivers/hwmon/w83781d.c2
-rw-r--r--drivers/hwmon/w83791d.c2
-rw-r--r--drivers/hwmon/w83792d.c2
-rw-r--r--drivers/hwmon/w83l786ng.c2
-rw-r--r--drivers/i2c/busses/i2c-at91.c7
-rw-r--r--drivers/i2c/busses/i2c-mxs.c2
-rw-r--r--drivers/i2c/busses/i2c-omap.c36
-rw-r--r--drivers/i2c/busses/i2c-s3c2410.c1
-rw-r--r--drivers/i2c/muxes/i2c-mux-pinctrl.c2
-rw-r--r--drivers/input/input-mt.c4
-rw-r--r--drivers/input/mousedev.c4
-rw-r--r--drivers/input/touchscreen/ads7846.c6
-rw-r--r--drivers/iommu/intel-iommu.c4
-rw-r--r--drivers/iommu/tegra-smmu.c1
-rw-r--r--drivers/irqchip/irq-bcm2835.c3
-rw-r--r--drivers/isdn/Kconfig2
-rw-r--r--drivers/isdn/i4l/Kconfig2
-rw-r--r--drivers/isdn/i4l/isdn_common.c4
-rw-r--r--drivers/leds/ledtrig-cpu.c21
-rw-r--r--drivers/md/dm.c8
-rw-r--r--drivers/md/md.c27
-rw-r--r--drivers/md/raid10.c131
-rw-r--r--drivers/md/raid5.c79
-rw-r--r--drivers/mmc/host/dw_mmc-exynos.c8
-rw-r--r--drivers/mmc/host/dw_mmc-pltfm.c6
-rw-r--r--drivers/mmc/host/dw_mmc-pltfm.h2
-rw-r--r--drivers/mmc/host/dw_mmc.c62
-rw-r--r--drivers/mmc/host/mxcmmc.c2
-rw-r--r--drivers/mmc/host/omap_hsmmc.c19
-rw-r--r--drivers/mmc/host/sdhci-dove.c38
-rw-r--r--drivers/mmc/host/sdhci-of-esdhc.c11
-rw-r--r--drivers/mmc/host/sdhci-pci.c2
-rw-r--r--drivers/mmc/host/sdhci-pltfm.c7
-rw-r--r--drivers/mmc/host/sdhci-s3c.c30
-rw-r--r--drivers/mmc/host/sdhci.c44
-rw-r--r--drivers/mmc/host/sdhci.h1
-rw-r--r--drivers/mmc/host/sh_mmcif.c2
-rw-r--r--drivers/mtd/devices/slram.c2
-rw-r--r--drivers/mtd/nand/nand_base.c10
-rw-r--r--drivers/mtd/ofpart.c2
-rw-r--r--drivers/mtd/onenand/onenand_base.c2
-rw-r--r--drivers/net/bonding/bond_main.c7
-rw-r--r--drivers/net/ethernet/8390/ne.c1
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c2
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c4
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c11
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4_hw.c6
-rw-r--r--drivers/net/ethernet/freescale/gianfar.c5
-rw-r--r--drivers/net/ethernet/jme.c36
-rw-r--r--drivers/net/ethernet/marvell/skge.c2
-rw-r--r--drivers/net/ethernet/micrel/ksz884x.c20
-rw-r--r--drivers/net/ethernet/realtek/8139cp.c22
-rw-r--r--drivers/net/ethernet/realtek/r8169.c5
-rw-r--r--drivers/net/ethernet/sis/sis900.c2
-rw-r--r--drivers/net/ethernet/smsc/smsc911x.c17
-rw-r--r--drivers/net/ethernet/tile/tilegx.c2
-rw-r--r--drivers/net/ethernet/xilinx/xilinx_axienet_main.c16
-rw-r--r--drivers/net/ethernet/xscale/ixp4xx_eth.c8
-rw-r--r--drivers/net/irda/sir_dev.c2
-rw-r--r--drivers/net/phy/mdio-bitbang.c1
-rw-r--r--drivers/net/phy/mdio-gpio.c11
-rw-r--r--drivers/net/team/team_mode_broadcast.c6
-rw-r--r--drivers/net/usb/cdc_eem.c3
-rw-r--r--drivers/net/usb/cdc_ncm.c22
-rw-r--r--drivers/net/usb/smsc95xx.c5
-rw-r--r--drivers/net/usb/usbnet.c8
-rw-r--r--drivers/net/vxlan.c10
-rw-r--r--drivers/net/wan/ixp4xx_hss.c8
-rw-r--r--drivers/net/wireless/ath/ath9k/hw.c2
-rw-r--r--drivers/net/wireless/b43legacy/pio.c2
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.c2
-rw-r--r--drivers/net/wireless/iwlwifi/dvm/mac80211.c16
-rw-r--r--drivers/net/wireless/iwlwifi/dvm/main.c2
-rw-r--r--drivers/net/wireless/iwlwifi/pcie/rx.c23
-rw-r--r--drivers/net/wireless/iwlwifi/pcie/tx.c8
-rw-r--r--drivers/net/wireless/mwifiex/cmdevt.c11
-rw-r--r--drivers/net/wireless/mwifiex/sdio.c11
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192cu/sw.c1
-rw-r--r--drivers/net/xen-netfront.c98
-rw-r--r--drivers/nfc/pn533.c25
-rw-r--r--drivers/pci/bus.c3
-rw-r--r--drivers/pci/pci-driver.c12
-rw-r--r--drivers/pci/pci-sysfs.c34
-rw-r--r--drivers/pci/pci.c32
-rw-r--r--drivers/pci/pci.h2
-rw-r--r--drivers/pci/pcie/aer/aerdrv_core.c20
-rw-r--r--drivers/pci/pcie/portdrv_core.c3
-rw-r--r--drivers/pci/proc.c8
-rw-r--r--drivers/pinctrl/Kconfig4
-rw-r--r--drivers/pinctrl/spear/pinctrl-spear.c2
-rw-r--r--drivers/pinctrl/spear/pinctrl-spear1310.c365
-rw-r--r--drivers/pinctrl/spear/pinctrl-spear1340.c41
-rw-r--r--drivers/pinctrl/spear/pinctrl-spear320.c8
-rw-r--r--drivers/pinctrl/spear/pinctrl-spear3xx.h1
-rw-r--r--drivers/rapidio/rio.c2
-rw-r--r--drivers/regulator/core.c33
-rw-r--r--drivers/s390/char/con3215.c12
-rw-r--r--drivers/s390/cio/css.h3
-rw-r--r--drivers/s390/cio/device.c8
-rw-r--r--drivers/s390/cio/idset.c3
-rw-r--r--drivers/s390/net/qeth_core_main.c24
-rw-r--r--drivers/s390/net/qeth_l2_main.c13
-rw-r--r--drivers/scsi/isci/request.c2
-rw-r--r--drivers/scsi/qlogicpti.c13
-rw-r--r--drivers/scsi/scsi.c45
-rw-r--r--drivers/scsi/scsi_lib.c22
-rw-r--r--drivers/scsi/sd.c202
-rw-r--r--drivers/scsi/sd.h7
-rw-r--r--drivers/staging/android/android_alarm.h4
-rw-r--r--drivers/tty/hvc/hvc_console.c7
-rw-r--r--drivers/tty/serial/max310x.c1
-rw-r--r--drivers/usb/core/hcd.c16
-rw-r--r--drivers/usb/early/ehci-dbgp.c15
-rw-r--r--drivers/usb/gadget/u_ether.c3
-rw-r--r--drivers/usb/host/ehci-ls1x.c2
-rw-r--r--drivers/usb/host/ohci-xls.c2
-rw-r--r--drivers/usb/musb/musb_gadget.c30
-rw-r--r--drivers/usb/musb/ux500.c2
-rw-r--r--drivers/usb/otg/Kconfig4
-rw-r--r--drivers/usb/serial/keyspan.c3
-rw-r--r--drivers/usb/serial/option.c9
-rw-r--r--drivers/usb/serial/usb_wwan.c10
-rw-r--r--drivers/usb/storage/scsiglue.c6
-rw-r--r--drivers/video/omap2/dss/dsi.c13
-rw-r--r--drivers/video/omap2/dss/dss.c18
-rw-r--r--drivers/video/omap2/dss/hdmi.c4
-rw-r--r--drivers/video/omap2/omapfb/omapfb-ioctl.c2
-rw-r--r--drivers/virtio/virtio.c4
-rw-r--r--drivers/xen/Makefile1
-rw-r--r--drivers/xen/events.c2
-rw-r--r--drivers/xen/fallback.c80
-rw-r--r--drivers/xen/privcmd.c18
460 files changed, 29284 insertions, 11954 deletions
diff --git a/drivers/ata/ahci_platform.c b/drivers/ata/ahci_platform.c
index b1ae48054dc5..b7078afddb74 100644
--- a/drivers/ata/ahci_platform.c
+++ b/drivers/ata/ahci_platform.c
@@ -238,7 +238,7 @@ static int __devexit ahci_remove(struct platform_device *pdev)
return 0;
}
-#ifdef CONFIG_PM
+#ifdef CONFIG_PM_SLEEP
static int ahci_suspend(struct device *dev)
{
struct ahci_platform_data *pdata = dev_get_platdata(dev);
diff --git a/drivers/ata/libata-acpi.c b/drivers/ata/libata-acpi.c
index fd9ecf74e631..5b0ba3f20edc 100644
--- a/drivers/ata/libata-acpi.c
+++ b/drivers/ata/libata-acpi.c
@@ -1105,10 +1105,15 @@ static int ata_acpi_bind_device(struct ata_port *ap, struct scsi_device *sdev,
struct acpi_device *acpi_dev;
struct acpi_device_power_state *states;
- if (ap->flags & ATA_FLAG_ACPI_SATA)
- ata_dev = &ap->link.device[sdev->channel];
- else
+ if (ap->flags & ATA_FLAG_ACPI_SATA) {
+ if (!sata_pmp_attached(ap))
+ ata_dev = &ap->link.device[sdev->id];
+ else
+ ata_dev = &ap->pmp_link[sdev->channel].device[sdev->id];
+ }
+ else {
ata_dev = &ap->link.device[sdev->id];
+ }
*handle = ata_dev_acpi_handle(ata_dev);
diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c
index 3cc7096cfda7..f46fbd3bd3fb 100644
--- a/drivers/ata/libata-core.c
+++ b/drivers/ata/libata-core.c
@@ -2942,6 +2942,10 @@ const struct ata_timing *ata_timing_find_mode(u8 xfer_mode)
if (xfer_mode == t->mode)
return t;
+
+ WARN_ONCE(true, "%s: unable to find timing for xfer_mode 0x%x\n",
+ __func__, xfer_mode);
+
return NULL;
}
diff --git a/drivers/ata/libata-scsi.c b/drivers/ata/libata-scsi.c
index e3bda074fa12..a6df6a351d6e 100644
--- a/drivers/ata/libata-scsi.c
+++ b/drivers/ata/libata-scsi.c
@@ -1052,6 +1052,8 @@ static void ata_scsi_sdev_config(struct scsi_device *sdev)
{
sdev->use_10_for_rw = 1;
sdev->use_10_for_ms = 1;
+ sdev->no_report_opcodes = 1;
+ sdev->no_write_same = 1;
/* Schedule policy is determined by ->qc_defer() callback and
* it needs to see every deferred qc. Set dev_blocked to 1 to
diff --git a/drivers/ata/pata_arasan_cf.c b/drivers/ata/pata_arasan_cf.c
index 26201ebef3ca..371fd2c698b7 100644
--- a/drivers/ata/pata_arasan_cf.c
+++ b/drivers/ata/pata_arasan_cf.c
@@ -317,6 +317,12 @@ static int cf_init(struct arasan_cf_dev *acdev)
return ret;
}
+ ret = clk_set_rate(acdev->clk, 166000000);
+ if (ret) {
+ dev_warn(acdev->host->dev, "clock set rate failed");
+ return ret;
+ }
+
spin_lock_irqsave(&acdev->host->lock, flags);
/* configure CF interface clock */
writel((pdata->cf_if_clk <= CF_IF_CLK_200M) ? pdata->cf_if_clk :
@@ -908,7 +914,7 @@ static int __devexit arasan_cf_remove(struct platform_device *pdev)
return 0;
}
-#ifdef CONFIG_PM
+#ifdef CONFIG_PM_SLEEP
static int arasan_cf_suspend(struct device *dev)
{
struct ata_host *host = dev_get_drvdata(dev);
diff --git a/drivers/ata/sata_highbank.c b/drivers/ata/sata_highbank.c
index 0d7c4c2cd26f..400bf1c3e982 100644
--- a/drivers/ata/sata_highbank.c
+++ b/drivers/ata/sata_highbank.c
@@ -260,7 +260,7 @@ static const struct of_device_id ahci_of_match[] = {
};
MODULE_DEVICE_TABLE(of, ahci_of_match);
-static int __init ahci_highbank_probe(struct platform_device *pdev)
+static int __devinit ahci_highbank_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct ahci_host_priv *hpriv;
@@ -378,7 +378,7 @@ static int __devexit ahci_highbank_remove(struct platform_device *pdev)
return 0;
}
-#ifdef CONFIG_PM
+#ifdef CONFIG_PM_SLEEP
static int ahci_highbank_suspend(struct device *dev)
{
struct ata_host *host = dev_get_drvdata(dev);
diff --git a/drivers/ata/sata_svw.c b/drivers/ata/sata_svw.c
index 44a4256533e1..08608de87e4e 100644
--- a/drivers/ata/sata_svw.c
+++ b/drivers/ata/sata_svw.c
@@ -142,6 +142,39 @@ static int k2_sata_scr_write(struct ata_link *link,
return 0;
}
+static int k2_sata_softreset(struct ata_link *link,
+ unsigned int *class, unsigned long deadline)
+{
+ u8 dmactl;
+ void __iomem *mmio = link->ap->ioaddr.bmdma_addr;
+
+ dmactl = readb(mmio + ATA_DMA_CMD);
+
+ /* Clear the start bit */
+ if (dmactl & ATA_DMA_START) {
+ dmactl &= ~ATA_DMA_START;
+ writeb(dmactl, mmio + ATA_DMA_CMD);
+ }
+
+ return ata_sff_softreset(link, class, deadline);
+}
+
+static int k2_sata_hardreset(struct ata_link *link,
+ unsigned int *class, unsigned long deadline)
+{
+ u8 dmactl;
+ void __iomem *mmio = link->ap->ioaddr.bmdma_addr;
+
+ dmactl = readb(mmio + ATA_DMA_CMD);
+
+ /* Clear the start bit */
+ if (dmactl & ATA_DMA_START) {
+ dmactl &= ~ATA_DMA_START;
+ writeb(dmactl, mmio + ATA_DMA_CMD);
+ }
+
+ return sata_sff_hardreset(link, class, deadline);
+}
static void k2_sata_tf_load(struct ata_port *ap, const struct ata_taskfile *tf)
{
@@ -346,6 +379,8 @@ static struct scsi_host_template k2_sata_sht = {
static struct ata_port_operations k2_sata_ops = {
.inherits = &ata_bmdma_port_ops,
+ .softreset = k2_sata_softreset,
+ .hardreset = k2_sata_hardreset,
.sff_tf_load = k2_sata_tf_load,
.sff_tf_read = k2_sata_tf_read,
.sff_check_status = k2_stat_check_status,
diff --git a/drivers/base/platform.c b/drivers/base/platform.c
index 8727e9c5eea4..72c776f2a1f5 100644
--- a/drivers/base/platform.c
+++ b/drivers/base/platform.c
@@ -83,9 +83,16 @@ EXPORT_SYMBOL_GPL(platform_get_resource);
*/
int platform_get_irq(struct platform_device *dev, unsigned int num)
{
+#ifdef CONFIG_SPARC
+ /* sparc does not have irqs represented as IORESOURCE_IRQ resources */
+ if (!dev || num >= dev->archdata.num_irqs)
+ return -ENXIO;
+ return dev->archdata.irqs[num];
+#else
struct resource *r = platform_get_resource(dev, IORESOURCE_IRQ, num);
return r ? r->start : -ENXIO;
+#endif
}
EXPORT_SYMBOL_GPL(platform_get_irq);
diff --git a/drivers/base/power/qos.c b/drivers/base/power/qos.c
index 74a67e0019a2..fbbd4ed2edf2 100644
--- a/drivers/base/power/qos.c
+++ b/drivers/base/power/qos.c
@@ -451,7 +451,7 @@ int dev_pm_qos_add_ancestor_request(struct device *dev,
if (ancestor)
error = dev_pm_qos_add_request(ancestor, req, value);
- if (error)
+ if (error < 0)
req->dev = NULL;
return error;
diff --git a/drivers/block/aoe/aoecmd.c b/drivers/block/aoe/aoecmd.c
index 3804a0af3ef1..9fe4f1865558 100644
--- a/drivers/block/aoe/aoecmd.c
+++ b/drivers/block/aoe/aoecmd.c
@@ -935,7 +935,7 @@ aoe_end_request(struct aoedev *d, struct request *rq, int fastfail)
/* cf. http://lkml.org/lkml/2006/10/31/28 */
if (!fastfail)
- q->request_fn(q);
+ __blk_run_queue(q);
}
static void
diff --git a/drivers/block/floppy.c b/drivers/block/floppy.c
index 1c49d7173966..2ddd64a9ffde 100644
--- a/drivers/block/floppy.c
+++ b/drivers/block/floppy.c
@@ -4330,6 +4330,7 @@ out_unreg_region:
out_unreg_blkdev:
unregister_blkdev(FLOPPY_MAJOR, "fd");
out_put_disk:
+ destroy_workqueue(floppy_wq);
for (drive = 0; drive < N_DRIVE; drive++) {
if (!disks[drive])
break;
@@ -4340,7 +4341,6 @@ out_put_disk:
}
put_disk(disks[drive]);
}
- destroy_workqueue(floppy_wq);
return err;
}
@@ -4555,6 +4555,8 @@ static void __exit floppy_module_exit(void)
unregister_blkdev(FLOPPY_MAJOR, "fd");
platform_driver_unregister(&floppy_driver);
+ destroy_workqueue(floppy_wq);
+
for (drive = 0; drive < N_DRIVE; drive++) {
del_timer_sync(&motor_off_timer[drive]);
@@ -4578,7 +4580,6 @@ static void __exit floppy_module_exit(void)
cancel_delayed_work_sync(&fd_timeout);
cancel_delayed_work_sync(&fd_timer);
- destroy_workqueue(floppy_wq);
if (atomic_read(&usage_count))
floppy_release_irq_and_dma();
diff --git a/drivers/block/mtip32xx/mtip32xx.c b/drivers/block/mtip32xx/mtip32xx.c
index adc6f36564cf..9694dd99bbbc 100644
--- a/drivers/block/mtip32xx/mtip32xx.c
+++ b/drivers/block/mtip32xx/mtip32xx.c
@@ -559,7 +559,7 @@ static void mtip_timeout_function(unsigned long int data)
struct mtip_cmd *command;
int tag, cmdto_cnt = 0;
unsigned int bit, group;
- unsigned int num_command_slots = port->dd->slot_groups * 32;
+ unsigned int num_command_slots;
unsigned long to, tagaccum[SLOTBITS_IN_LONGS];
if (unlikely(!port))
@@ -572,6 +572,7 @@ static void mtip_timeout_function(unsigned long int data)
}
/* clear the tag accumulator */
memset(tagaccum, 0, SLOTBITS_IN_LONGS * sizeof(long));
+ num_command_slots = port->dd->slot_groups * 32;
for (tag = 0; tag < num_command_slots; tag++) {
/*
@@ -2218,8 +2219,8 @@ static int exec_drive_taskfile(struct driver_data *dd,
fis.device);
/* check for erase mode support during secure erase.*/
- if ((fis.command == ATA_CMD_SEC_ERASE_UNIT)
- && (outbuf[0] & MTIP_SEC_ERASE_MODE)) {
+ if ((fis.command == ATA_CMD_SEC_ERASE_UNIT) && outbuf &&
+ (outbuf[0] & MTIP_SEC_ERASE_MODE)) {
erasemode = 1;
}
@@ -2439,7 +2440,7 @@ static int mtip_hw_ioctl(struct driver_data *dd, unsigned int cmd,
* return value
* None
*/
-static void mtip_hw_submit_io(struct driver_data *dd, sector_t start,
+static void mtip_hw_submit_io(struct driver_data *dd, sector_t sector,
int nsect, int nents, int tag, void *callback,
void *data, int dir)
{
@@ -2447,6 +2448,7 @@ static void mtip_hw_submit_io(struct driver_data *dd, sector_t start,
struct mtip_port *port = dd->port;
struct mtip_cmd *command = &port->commands[tag];
int dma_dir = (dir == READ) ? DMA_FROM_DEVICE : DMA_TO_DEVICE;
+ u64 start = sector;
/* Map the scatter list for DMA access */
nents = dma_map_sg(&dd->pdev->dev, command->sg, nents, dma_dir);
@@ -2465,8 +2467,12 @@ static void mtip_hw_submit_io(struct driver_data *dd, sector_t start,
fis->opts = 1 << 7;
fis->command =
(dir == READ ? ATA_CMD_FPDMA_READ : ATA_CMD_FPDMA_WRITE);
- *((unsigned int *) &fis->lba_low) = (start & 0xFFFFFF);
- *((unsigned int *) &fis->lba_low_ex) = ((start >> 24) & 0xFFFFFF);
+ fis->lba_low = start & 0xFF;
+ fis->lba_mid = (start >> 8) & 0xFF;
+ fis->lba_hi = (start >> 16) & 0xFF;
+ fis->lba_low_ex = (start >> 24) & 0xFF;
+ fis->lba_mid_ex = (start >> 32) & 0xFF;
+ fis->lba_hi_ex = (start >> 40) & 0xFF;
fis->device = 1 << 6;
fis->features = nsect & 0xFF;
fis->features_ex = (nsect >> 8) & 0xFF;
diff --git a/drivers/block/mtip32xx/mtip32xx.h b/drivers/block/mtip32xx/mtip32xx.h
index 5f4a917bd8bb..b1742640556a 100644
--- a/drivers/block/mtip32xx/mtip32xx.h
+++ b/drivers/block/mtip32xx/mtip32xx.h
@@ -34,7 +34,7 @@
#define PCIE_CONFIG_EXT_DEVICE_CONTROL_OFFSET 0x48
/* check for erase mode support during secure erase */
-#define MTIP_SEC_ERASE_MODE 0x3
+#define MTIP_SEC_ERASE_MODE 0x2
/* # of times to retry timed out/failed IOs */
#define MTIP_MAX_RETRIES 2
@@ -155,14 +155,14 @@ enum {
MTIP_DDF_REBUILD_FAILED_BIT = 8,
};
-__packed struct smart_attr{
+struct smart_attr {
u8 attr_id;
u16 flags;
u8 cur;
u8 worst;
u32 data;
u8 res[3];
-};
+} __packed;
/* Register Frame Information Structure (FIS), host to device. */
struct host_to_dev_fis {
diff --git a/drivers/bluetooth/ath3k.c b/drivers/bluetooth/ath3k.c
index fc2de5528dcc..b00000e8aef6 100644
--- a/drivers/bluetooth/ath3k.c
+++ b/drivers/bluetooth/ath3k.c
@@ -67,6 +67,7 @@ static struct usb_device_id ath3k_table[] = {
{ USB_DEVICE(0x13d3, 0x3304) },
{ USB_DEVICE(0x0930, 0x0215) },
{ USB_DEVICE(0x0489, 0xE03D) },
+ { USB_DEVICE(0x0489, 0xE027) },
/* Atheros AR9285 Malbec with sflash firmware */
{ USB_DEVICE(0x03F0, 0x311D) },
diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c
index debda27df9b0..ee82f2fb65f0 100644
--- a/drivers/bluetooth/btusb.c
+++ b/drivers/bluetooth/btusb.c
@@ -124,6 +124,7 @@ static struct usb_device_id blacklist_table[] = {
{ USB_DEVICE(0x13d3, 0x3304), .driver_info = BTUSB_IGNORE },
{ USB_DEVICE(0x0930, 0x0215), .driver_info = BTUSB_IGNORE },
{ USB_DEVICE(0x0489, 0xe03d), .driver_info = BTUSB_IGNORE },
+ { USB_DEVICE(0x0489, 0xe027), .driver_info = BTUSB_IGNORE },
/* Atheros AR9285 Malbec with sflash firmware */
{ USB_DEVICE(0x03f0, 0x311d), .driver_info = BTUSB_IGNORE },
diff --git a/drivers/bus/omap-ocp2scp.c b/drivers/bus/omap-ocp2scp.c
index ff63560b8467..0c48b0e05ed6 100644
--- a/drivers/bus/omap-ocp2scp.c
+++ b/drivers/bus/omap-ocp2scp.c
@@ -22,6 +22,26 @@
#include <linux/pm_runtime.h>
#include <linux/of.h>
#include <linux/of_platform.h>
+#include <linux/platform_data/omap_ocp2scp.h>
+
+/**
+ * _count_resources - count for the number of resources
+ * @res: struct resource *
+ *
+ * Count and return the number of resources populated for the device that is
+ * connected to ocp2scp.
+ */
+static unsigned _count_resources(struct resource *res)
+{
+ int cnt = 0;
+
+ while (res->start != res->end) {
+ cnt++;
+ res++;
+ }
+
+ return cnt;
+}
static int ocp2scp_remove_devices(struct device *dev, void *c)
{
@@ -34,20 +54,62 @@ static int ocp2scp_remove_devices(struct device *dev, void *c)
static int __devinit omap_ocp2scp_probe(struct platform_device *pdev)
{
- int ret;
- struct device_node *np = pdev->dev.of_node;
+ int ret;
+ unsigned res_cnt, i;
+ struct device_node *np = pdev->dev.of_node;
+ struct platform_device *pdev_child;
+ struct omap_ocp2scp_platform_data *pdata = pdev->dev.platform_data;
+ struct omap_ocp2scp_dev *dev;
if (np) {
ret = of_platform_populate(np, NULL, NULL, &pdev->dev);
if (ret) {
- dev_err(&pdev->dev, "failed to add resources for ocp2scp child\n");
+ dev_err(&pdev->dev,
+ "failed to add resources for ocp2scp child\n");
goto err0;
}
+ } else if (pdata) {
+ for (i = 0, dev = *pdata->devices; i < pdata->dev_cnt; i++,
+ dev++) {
+ res_cnt = _count_resources(dev->res);
+
+ pdev_child = platform_device_alloc(dev->drv_name,
+ PLATFORM_DEVID_AUTO);
+ if (!pdev_child) {
+ dev_err(&pdev->dev,
+ "failed to allocate mem for ocp2scp child\n");
+ goto err0;
+ }
+
+ ret = platform_device_add_resources(pdev_child,
+ dev->res, res_cnt);
+ if (ret) {
+ dev_err(&pdev->dev,
+ "failed to add resources for ocp2scp child\n");
+ goto err1;
+ }
+
+ pdev_child->dev.parent = &pdev->dev;
+
+ ret = platform_device_add(pdev_child);
+ if (ret) {
+ dev_err(&pdev->dev,
+ "failed to register ocp2scp child device\n");
+ goto err1;
+ }
+ }
+ } else {
+ dev_err(&pdev->dev, "OCP2SCP initialized without plat data\n");
+ return -EINVAL;
}
+
pm_runtime_enable(&pdev->dev);
return 0;
+err1:
+ platform_device_put(pdev_child);
+
err0:
device_for_each_child(&pdev->dev, NULL, ocp2scp_remove_devices);
diff --git a/drivers/clk/ux500/u8500_clk.c b/drivers/clk/ux500/u8500_clk.c
index ca4a25ed844c..e2c17d187d98 100644
--- a/drivers/clk/ux500/u8500_clk.c
+++ b/drivers/clk/ux500/u8500_clk.c
@@ -40,7 +40,7 @@ void u8500_clk_init(void)
CLK_IS_ROOT|CLK_IGNORE_UNUSED,
32768);
clk_register_clkdev(clk, "clk32k", NULL);
- clk_register_clkdev(clk, NULL, "rtc-pl031");
+ clk_register_clkdev(clk, "apb_pclk", "rtc-pl031");
/* PRCMU clocks */
fw_version = prcmu_get_fw_version();
@@ -228,10 +228,17 @@ void u8500_clk_init(void)
clk = clk_reg_prcc_pclk("p1_pclk2", "per1clk", U8500_CLKRST1_BASE,
BIT(2), 0);
+ clk_register_clkdev(clk, "apb_pclk", "nmk-i2c.1");
+
clk = clk_reg_prcc_pclk("p1_pclk3", "per1clk", U8500_CLKRST1_BASE,
BIT(3), 0);
+ clk_register_clkdev(clk, "apb_pclk", "msp0");
+ clk_register_clkdev(clk, "apb_pclk", "ux500-msp-i2s.0");
+
clk = clk_reg_prcc_pclk("p1_pclk4", "per1clk", U8500_CLKRST1_BASE,
BIT(4), 0);
+ clk_register_clkdev(clk, "apb_pclk", "msp1");
+ clk_register_clkdev(clk, "apb_pclk", "ux500-msp-i2s.1");
clk = clk_reg_prcc_pclk("p1_pclk5", "per1clk", U8500_CLKRST1_BASE,
BIT(5), 0);
@@ -239,6 +246,7 @@ void u8500_clk_init(void)
clk = clk_reg_prcc_pclk("p1_pclk6", "per1clk", U8500_CLKRST1_BASE,
BIT(6), 0);
+ clk_register_clkdev(clk, "apb_pclk", "nmk-i2c.2");
clk = clk_reg_prcc_pclk("p1_pclk7", "per1clk", U8500_CLKRST1_BASE,
BIT(7), 0);
@@ -246,6 +254,7 @@ void u8500_clk_init(void)
clk = clk_reg_prcc_pclk("p1_pclk8", "per1clk", U8500_CLKRST1_BASE,
BIT(8), 0);
+ clk_register_clkdev(clk, "apb_pclk", "slimbus0");
clk = clk_reg_prcc_pclk("p1_pclk9", "per1clk", U8500_CLKRST1_BASE,
BIT(9), 0);
@@ -255,11 +264,16 @@ void u8500_clk_init(void)
clk = clk_reg_prcc_pclk("p1_pclk10", "per1clk", U8500_CLKRST1_BASE,
BIT(10), 0);
+ clk_register_clkdev(clk, "apb_pclk", "nmk-i2c.4");
+
clk = clk_reg_prcc_pclk("p1_pclk11", "per1clk", U8500_CLKRST1_BASE,
BIT(11), 0);
+ clk_register_clkdev(clk, "apb_pclk", "msp3");
+ clk_register_clkdev(clk, "apb_pclk", "ux500-msp-i2s.3");
clk = clk_reg_prcc_pclk("p2_pclk0", "per2clk", U8500_CLKRST2_BASE,
BIT(0), 0);
+ clk_register_clkdev(clk, "apb_pclk", "nmk-i2c.3");
clk = clk_reg_prcc_pclk("p2_pclk1", "per2clk", U8500_CLKRST2_BASE,
BIT(1), 0);
@@ -279,12 +293,13 @@ void u8500_clk_init(void)
clk = clk_reg_prcc_pclk("p2_pclk5", "per2clk", U8500_CLKRST2_BASE,
BIT(5), 0);
+ clk_register_clkdev(clk, "apb_pclk", "msp2");
+ clk_register_clkdev(clk, "apb_pclk", "ux500-msp-i2s.2");
clk = clk_reg_prcc_pclk("p2_pclk6", "per2clk", U8500_CLKRST2_BASE,
BIT(6), 0);
clk_register_clkdev(clk, "apb_pclk", "sdi1");
-
clk = clk_reg_prcc_pclk("p2_pclk7", "per2clk", U8500_CLKRST2_BASE,
BIT(7), 0);
clk_register_clkdev(clk, "apb_pclk", "sdi3");
@@ -316,10 +331,15 @@ void u8500_clk_init(void)
clk = clk_reg_prcc_pclk("p3_pclk1", "per3clk", U8500_CLKRST3_BASE,
BIT(1), 0);
+ clk_register_clkdev(clk, "apb_pclk", "ssp0");
+
clk = clk_reg_prcc_pclk("p3_pclk2", "per3clk", U8500_CLKRST3_BASE,
BIT(2), 0);
+ clk_register_clkdev(clk, "apb_pclk", "ssp1");
+
clk = clk_reg_prcc_pclk("p3_pclk3", "per3clk", U8500_CLKRST3_BASE,
BIT(3), 0);
+ clk_register_clkdev(clk, "apb_pclk", "nmk-i2c.0");
clk = clk_reg_prcc_pclk("p3_pclk4", "per3clk", U8500_CLKRST3_BASE,
BIT(4), 0);
@@ -401,10 +421,17 @@ void u8500_clk_init(void)
clk = clk_reg_prcc_kclk("p1_i2c1_kclk", "i2cclk",
U8500_CLKRST1_BASE, BIT(2), CLK_SET_RATE_GATE);
+ clk_register_clkdev(clk, NULL, "nmk-i2c.1");
+
clk = clk_reg_prcc_kclk("p1_msp0_kclk", "msp02clk",
U8500_CLKRST1_BASE, BIT(3), CLK_SET_RATE_GATE);
+ clk_register_clkdev(clk, NULL, "msp0");
+ clk_register_clkdev(clk, NULL, "ux500-msp-i2s.0");
+
clk = clk_reg_prcc_kclk("p1_msp1_kclk", "msp1clk",
U8500_CLKRST1_BASE, BIT(4), CLK_SET_RATE_GATE);
+ clk_register_clkdev(clk, NULL, "msp1");
+ clk_register_clkdev(clk, NULL, "ux500-msp-i2s.1");
clk = clk_reg_prcc_kclk("p1_sdi0_kclk", "sdmmcclk",
U8500_CLKRST1_BASE, BIT(5), CLK_SET_RATE_GATE);
@@ -412,17 +439,25 @@ void u8500_clk_init(void)
clk = clk_reg_prcc_kclk("p1_i2c2_kclk", "i2cclk",
U8500_CLKRST1_BASE, BIT(6), CLK_SET_RATE_GATE);
+ clk_register_clkdev(clk, NULL, "nmk-i2c.2");
+
clk = clk_reg_prcc_kclk("p1_slimbus0_kclk", "slimclk",
- U8500_CLKRST1_BASE, BIT(3), CLK_SET_RATE_GATE);
- /* FIXME: Redefinition of BIT(3). */
+ U8500_CLKRST1_BASE, BIT(8), CLK_SET_RATE_GATE);
+ clk_register_clkdev(clk, NULL, "slimbus0");
+
clk = clk_reg_prcc_kclk("p1_i2c4_kclk", "i2cclk",
U8500_CLKRST1_BASE, BIT(9), CLK_SET_RATE_GATE);
+ clk_register_clkdev(clk, NULL, "nmk-i2c.4");
+
clk = clk_reg_prcc_kclk("p1_msp3_kclk", "msp1clk",
U8500_CLKRST1_BASE, BIT(10), CLK_SET_RATE_GATE);
+ clk_register_clkdev(clk, NULL, "msp3");
+ clk_register_clkdev(clk, NULL, "ux500-msp-i2s.3");
/* Periph2 */
clk = clk_reg_prcc_kclk("p2_i2c3_kclk", "i2cclk",
U8500_CLKRST2_BASE, BIT(0), CLK_SET_RATE_GATE);
+ clk_register_clkdev(clk, NULL, "nmk-i2c.3");
clk = clk_reg_prcc_kclk("p2_sdi4_kclk", "sdmmcclk",
U8500_CLKRST2_BASE, BIT(2), CLK_SET_RATE_GATE);
@@ -430,6 +465,8 @@ void u8500_clk_init(void)
clk = clk_reg_prcc_kclk("p2_msp2_kclk", "msp02clk",
U8500_CLKRST2_BASE, BIT(3), CLK_SET_RATE_GATE);
+ clk_register_clkdev(clk, NULL, "msp2");
+ clk_register_clkdev(clk, NULL, "ux500-msp-i2s.2");
clk = clk_reg_prcc_kclk("p2_sdi1_kclk", "sdmmcclk",
U8500_CLKRST2_BASE, BIT(4), CLK_SET_RATE_GATE);
@@ -450,10 +487,15 @@ void u8500_clk_init(void)
/* Periph3 */
clk = clk_reg_prcc_kclk("p3_ssp0_kclk", "sspclk",
U8500_CLKRST3_BASE, BIT(1), CLK_SET_RATE_GATE);
+ clk_register_clkdev(clk, NULL, "ssp0");
+
clk = clk_reg_prcc_kclk("p3_ssp1_kclk", "sspclk",
U8500_CLKRST3_BASE, BIT(2), CLK_SET_RATE_GATE);
+ clk_register_clkdev(clk, NULL, "ssp1");
+
clk = clk_reg_prcc_kclk("p3_i2c0_kclk", "i2cclk",
U8500_CLKRST3_BASE, BIT(3), CLK_SET_RATE_GATE);
+ clk_register_clkdev(clk, NULL, "nmk-i2c.0");
clk = clk_reg_prcc_kclk("p3_sdi2_kclk", "sdmmcclk",
U8500_CLKRST3_BASE, BIT(4), CLK_SET_RATE_GATE);
diff --git a/drivers/edac/amd64_edac.h b/drivers/edac/amd64_edac.h
index 8d4804732bac..8c4139647efc 100644
--- a/drivers/edac/amd64_edac.h
+++ b/drivers/edac/amd64_edac.h
@@ -33,7 +33,7 @@
* detection. The mods to Rev F required more family
* information detection.
*
- * Changes/Fixes by Borislav Petkov <borislav.petkov@amd.com>:
+ * Changes/Fixes by Borislav Petkov <bp@alien8.de>:
* - misc fixes and code cleanups
*
* This module is based on the following documents
diff --git a/drivers/edac/edac_stub.c b/drivers/edac/edac_stub.c
index 6c86f6e54558..351945fa2ecd 100644
--- a/drivers/edac/edac_stub.c
+++ b/drivers/edac/edac_stub.c
@@ -5,7 +5,7 @@
*
* 2007 (c) MontaVista Software, Inc.
* 2010 (c) Advanced Micro Devices Inc.
- * Borislav Petkov <borislav.petkov@amd.com>
+ * Borislav Petkov <bp@alien8.de>
*
* This file is licensed under the terms of the GNU General Public
* License version 2. This program is licensed "as is" without any
diff --git a/drivers/edac/mce_amd_inj.c b/drivers/edac/mce_amd_inj.c
index 66b5151c1080..2ae78f20cc28 100644
--- a/drivers/edac/mce_amd_inj.c
+++ b/drivers/edac/mce_amd_inj.c
@@ -6,7 +6,7 @@
* This file may be distributed under the terms of the GNU General Public
* License version 2.
*
- * Copyright (c) 2010: Borislav Petkov <borislav.petkov@amd.com>
+ * Copyright (c) 2010: Borislav Petkov <bp@alien8.de>
* Advanced Micro Devices Inc.
*/
@@ -168,6 +168,6 @@ module_init(edac_init_mce_inject);
module_exit(edac_exit_mce_inject);
MODULE_LICENSE("GPL");
-MODULE_AUTHOR("Borislav Petkov <borislav.petkov@amd.com>");
+MODULE_AUTHOR("Borislav Petkov <bp@alien8.de>");
MODULE_AUTHOR("AMD Inc.");
MODULE_DESCRIPTION("MCE injection facility for testing MCE decoding");
diff --git a/drivers/firewire/sbp2.c b/drivers/firewire/sbp2.c
index 1162d6b3bf85..bb1b392f5cda 100644
--- a/drivers/firewire/sbp2.c
+++ b/drivers/firewire/sbp2.c
@@ -1546,6 +1546,8 @@ static int sbp2_scsi_slave_configure(struct scsi_device *sdev)
struct sbp2_logical_unit *lu = sdev->hostdata;
sdev->use_10_for_rw = 1;
+ sdev->no_report_opcodes = 1;
+ sdev->no_write_same = 1;
if (sbp2_param_exclusive_login)
sdev->manage_start_stop = 1;
diff --git a/drivers/gpio/Kconfig b/drivers/gpio/Kconfig
index d055cee36942..47150f5ded04 100644
--- a/drivers/gpio/Kconfig
+++ b/drivers/gpio/Kconfig
@@ -47,7 +47,7 @@ if GPIOLIB
config OF_GPIO
def_bool y
- depends on OF && !SPARC
+ depends on OF
config DEBUG_GPIO
bool "Debug GPIO calls"
@@ -466,7 +466,7 @@ config GPIO_ADP5588_IRQ
config GPIO_ADNP
tristate "Avionic Design N-bit GPIO expander"
- depends on I2C && OF
+ depends on I2C && OF_GPIO
help
This option enables support for N GPIOs found on Avionic Design
I2C GPIO expanders. The register space will be extended by powers
diff --git a/drivers/gpio/gpio-mcp23s08.c b/drivers/gpio/gpio-mcp23s08.c
index 0f425189de11..ce1c84760076 100644
--- a/drivers/gpio/gpio-mcp23s08.c
+++ b/drivers/gpio/gpio-mcp23s08.c
@@ -77,7 +77,7 @@ struct mcp23s08_driver_data {
/*----------------------------------------------------------------------*/
-#ifdef CONFIG_I2C
+#if IS_ENABLED(CONFIG_I2C)
static int mcp23008_read(struct mcp23s08 *mcp, unsigned reg)
{
@@ -399,7 +399,7 @@ static int mcp23s08_probe_one(struct mcp23s08 *mcp, struct device *dev,
break;
#endif /* CONFIG_SPI_MASTER */
-#ifdef CONFIG_I2C
+#if IS_ENABLED(CONFIG_I2C)
case MCP_TYPE_008:
mcp->ops = &mcp23008_ops;
mcp->chip.ngpio = 8;
@@ -473,7 +473,7 @@ fail:
/*----------------------------------------------------------------------*/
-#ifdef CONFIG_I2C
+#if IS_ENABLED(CONFIG_I2C)
static int __devinit mcp230xx_probe(struct i2c_client *client,
const struct i2c_device_id *id)
diff --git a/drivers/gpio/gpio-mvebu.c b/drivers/gpio/gpio-mvebu.c
index cf7afb9eb61a..be65c0451ad5 100644
--- a/drivers/gpio/gpio-mvebu.c
+++ b/drivers/gpio/gpio-mvebu.c
@@ -92,6 +92,11 @@ static inline void __iomem *mvebu_gpioreg_out(struct mvebu_gpio_chip *mvchip)
return mvchip->membase + GPIO_OUT_OFF;
}
+static inline void __iomem *mvebu_gpioreg_blink(struct mvebu_gpio_chip *mvchip)
+{
+ return mvchip->membase + GPIO_BLINK_EN_OFF;
+}
+
static inline void __iomem *mvebu_gpioreg_io_conf(struct mvebu_gpio_chip *mvchip)
{
return mvchip->membase + GPIO_IO_CONF_OFF;
@@ -206,6 +211,23 @@ static int mvebu_gpio_get(struct gpio_chip *chip, unsigned pin)
return (u >> pin) & 1;
}
+static void mvebu_gpio_blink(struct gpio_chip *chip, unsigned pin, int value)
+{
+ struct mvebu_gpio_chip *mvchip =
+ container_of(chip, struct mvebu_gpio_chip, chip);
+ unsigned long flags;
+ u32 u;
+
+ spin_lock_irqsave(&mvchip->lock, flags);
+ u = readl_relaxed(mvebu_gpioreg_blink(mvchip));
+ if (value)
+ u |= 1 << pin;
+ else
+ u &= ~(1 << pin);
+ writel_relaxed(u, mvebu_gpioreg_blink(mvchip));
+ spin_unlock_irqrestore(&mvchip->lock, flags);
+}
+
static int mvebu_gpio_direction_input(struct gpio_chip *chip, unsigned pin)
{
struct mvebu_gpio_chip *mvchip =
@@ -244,6 +266,7 @@ static int mvebu_gpio_direction_output(struct gpio_chip *chip, unsigned pin,
if (ret)
return ret;
+ mvebu_gpio_blink(chip, pin, 0);
mvebu_gpio_set(chip, pin, value);
spin_lock_irqsave(&mvchip->lock, flags);
diff --git a/drivers/gpu/drm/Makefile b/drivers/gpu/drm/Makefile
index ac91a339b042..6f58c81cfcbc 100644
--- a/drivers/gpu/drm/Makefile
+++ b/drivers/gpu/drm/Makefile
@@ -8,7 +8,7 @@ drm-y := drm_auth.o drm_buffer.o drm_bufs.o drm_cache.o \
drm_context.o drm_dma.o \
drm_drv.o drm_fops.o drm_gem.o drm_ioctl.o drm_irq.o \
drm_lock.o drm_memory.o drm_proc.o drm_stub.o drm_vm.o \
- drm_agpsupport.o drm_scatter.o ati_pcigart.o drm_pci.o \
+ drm_agpsupport.o drm_scatter.o drm_pci.o \
drm_platform.o drm_sysfs.o drm_hashtab.o drm_mm.o \
drm_crtc.o drm_modes.o drm_edid.o \
drm_info.o drm_debugfs.o drm_encoder_slave.o \
@@ -16,6 +16,7 @@ drm-y := drm_auth.o drm_buffer.o drm_bufs.o drm_cache.o \
drm-$(CONFIG_COMPAT) += drm_ioc32.o
drm-$(CONFIG_DRM_GEM_CMA_HELPER) += drm_gem_cma_helper.o
+drm-$(CONFIG_PCI) += ati_pcigart.o
drm-usb-y := drm_usb.o
diff --git a/drivers/gpu/drm/ast/ast_ttm.c b/drivers/gpu/drm/ast/ast_ttm.c
index 0a54f65a8ebb..3602731a6112 100644
--- a/drivers/gpu/drm/ast/ast_ttm.c
+++ b/drivers/gpu/drm/ast/ast_ttm.c
@@ -186,11 +186,11 @@ static void ast_ttm_io_mem_free(struct ttm_bo_device *bdev, struct ttm_mem_reg *
static int ast_bo_move(struct ttm_buffer_object *bo,
bool evict, bool interruptible,
- bool no_wait_reserve, bool no_wait_gpu,
+ bool no_wait_gpu,
struct ttm_mem_reg *new_mem)
{
int r;
- r = ttm_bo_move_memcpy(bo, evict, no_wait_reserve, no_wait_gpu, new_mem);
+ r = ttm_bo_move_memcpy(bo, evict, no_wait_gpu, new_mem);
return r;
}
@@ -383,7 +383,7 @@ int ast_bo_pin(struct ast_bo *bo, u32 pl_flag, u64 *gpu_addr)
ast_ttm_placement(bo, pl_flag);
for (i = 0; i < bo->placement.num_placement; i++)
bo->placements[i] |= TTM_PL_FLAG_NO_EVICT;
- ret = ttm_bo_validate(&bo->bo, &bo->placement, false, false, false);
+ ret = ttm_bo_validate(&bo->bo, &bo->placement, false, false);
if (ret)
return ret;
@@ -406,7 +406,7 @@ int ast_bo_unpin(struct ast_bo *bo)
for (i = 0; i < bo->placement.num_placement ; i++)
bo->placements[i] &= ~TTM_PL_FLAG_NO_EVICT;
- ret = ttm_bo_validate(&bo->bo, &bo->placement, false, false, false);
+ ret = ttm_bo_validate(&bo->bo, &bo->placement, false, false);
if (ret)
return ret;
@@ -431,7 +431,7 @@ int ast_bo_push_sysram(struct ast_bo *bo)
for (i = 0; i < bo->placement.num_placement ; i++)
bo->placements[i] |= TTM_PL_FLAG_NO_EVICT;
- ret = ttm_bo_validate(&bo->bo, &bo->placement, false, false, false);
+ ret = ttm_bo_validate(&bo->bo, &bo->placement, false, false);
if (ret) {
DRM_ERROR("pushing to VRAM failed\n");
return ret;
diff --git a/drivers/gpu/drm/cirrus/cirrus_ttm.c b/drivers/gpu/drm/cirrus/cirrus_ttm.c
index 90d770143cc2..1413a26e4905 100644
--- a/drivers/gpu/drm/cirrus/cirrus_ttm.c
+++ b/drivers/gpu/drm/cirrus/cirrus_ttm.c
@@ -186,11 +186,11 @@ static void cirrus_ttm_io_mem_free(struct ttm_bo_device *bdev, struct ttm_mem_re
static int cirrus_bo_move(struct ttm_buffer_object *bo,
bool evict, bool interruptible,
- bool no_wait_reserve, bool no_wait_gpu,
+ bool no_wait_gpu,
struct ttm_mem_reg *new_mem)
{
int r;
- r = ttm_bo_move_memcpy(bo, evict, no_wait_reserve, no_wait_gpu, new_mem);
+ r = ttm_bo_move_memcpy(bo, evict, no_wait_gpu, new_mem);
return r;
}
@@ -388,7 +388,7 @@ int cirrus_bo_pin(struct cirrus_bo *bo, u32 pl_flag, u64 *gpu_addr)
cirrus_ttm_placement(bo, pl_flag);
for (i = 0; i < bo->placement.num_placement; i++)
bo->placements[i] |= TTM_PL_FLAG_NO_EVICT;
- ret = ttm_bo_validate(&bo->bo, &bo->placement, false, false, false);
+ ret = ttm_bo_validate(&bo->bo, &bo->placement, false, false);
if (ret)
return ret;
@@ -411,7 +411,7 @@ int cirrus_bo_unpin(struct cirrus_bo *bo)
for (i = 0; i < bo->placement.num_placement ; i++)
bo->placements[i] &= ~TTM_PL_FLAG_NO_EVICT;
- ret = ttm_bo_validate(&bo->bo, &bo->placement, false, false, false);
+ ret = ttm_bo_validate(&bo->bo, &bo->placement, false, false);
if (ret)
return ret;
@@ -436,7 +436,7 @@ int cirrus_bo_push_sysram(struct cirrus_bo *bo)
for (i = 0; i < bo->placement.num_placement ; i++)
bo->placements[i] |= TTM_PL_FLAG_NO_EVICT;
- ret = ttm_bo_validate(&bo->bo, &bo->placement, false, false, false);
+ ret = ttm_bo_validate(&bo->bo, &bo->placement, false, false);
if (ret) {
DRM_ERROR("pushing to VRAM failed\n");
return ret;
diff --git a/drivers/gpu/drm/drm_crtc.c b/drivers/gpu/drm/drm_crtc.c
index d6d007275947..f2d667b8bee2 100644
--- a/drivers/gpu/drm/drm_crtc.c
+++ b/drivers/gpu/drm/drm_crtc.c
@@ -559,11 +559,11 @@ int drm_connector_init(struct drm_device *dev,
dev->mode_config.num_connector++;
if (connector_type != DRM_MODE_CONNECTOR_VIRTUAL)
- drm_connector_attach_property(connector,
+ drm_object_attach_property(&connector->base,
dev->mode_config.edid_property,
0);
- drm_connector_attach_property(connector,
+ drm_object_attach_property(&connector->base,
dev->mode_config.dpms_property, 0);
out:
@@ -2928,27 +2928,6 @@ void drm_property_destroy(struct drm_device *dev, struct drm_property *property)
}
EXPORT_SYMBOL(drm_property_destroy);
-void drm_connector_attach_property(struct drm_connector *connector,
- struct drm_property *property, uint64_t init_val)
-{
- drm_object_attach_property(&connector->base, property, init_val);
-}
-EXPORT_SYMBOL(drm_connector_attach_property);
-
-int drm_connector_property_set_value(struct drm_connector *connector,
- struct drm_property *property, uint64_t value)
-{
- return drm_object_property_set_value(&connector->base, property, value);
-}
-EXPORT_SYMBOL(drm_connector_property_set_value);
-
-int drm_connector_property_get_value(struct drm_connector *connector,
- struct drm_property *property, uint64_t *val)
-{
- return drm_object_property_get_value(&connector->base, property, val);
-}
-EXPORT_SYMBOL(drm_connector_property_get_value);
-
void drm_object_attach_property(struct drm_mode_object *obj,
struct drm_property *property,
uint64_t init_val)
@@ -3185,7 +3164,7 @@ int drm_mode_connector_update_edid_property(struct drm_connector *connector,
/* Delete edid, when there is none. */
if (!edid) {
connector->edid_blob_ptr = NULL;
- ret = drm_connector_property_set_value(connector, dev->mode_config.edid_property, 0);
+ ret = drm_object_property_set_value(&connector->base, dev->mode_config.edid_property, 0);
return ret;
}
@@ -3195,7 +3174,7 @@ int drm_mode_connector_update_edid_property(struct drm_connector *connector,
if (!connector->edid_blob_ptr)
return -EINVAL;
- ret = drm_connector_property_set_value(connector,
+ ret = drm_object_property_set_value(&connector->base,
dev->mode_config.edid_property,
connector->edid_blob_ptr->base.id);
@@ -3262,7 +3241,7 @@ static int drm_mode_connector_set_obj_prop(struct drm_mode_object *obj,
/* store the property value if successful */
if (!ret)
- drm_connector_property_set_value(connector, property, value);
+ drm_object_property_set_value(&connector->base, property, value);
return ret;
}
diff --git a/drivers/gpu/drm/drm_crtc_helper.c b/drivers/gpu/drm/drm_crtc_helper.c
index 1fe719fb32e8..7b2d378b2576 100644
--- a/drivers/gpu/drm/drm_crtc_helper.c
+++ b/drivers/gpu/drm/drm_crtc_helper.c
@@ -39,6 +39,17 @@
#include <drm/drm_fb_helper.h>
#include <drm/drm_edid.h>
+/**
+ * drm_helper_move_panel_connectors_to_head() - move panels to the front in the
+ * connector list
+ * @dev: drm device to operate on
+ *
+ * Some userspace presumes that the first connected connector is the main
+ * display, where it's supposed to display e.g. the login screen. For
+ * laptops, this should be the main panel. Use this function to sort all
+ * (eDP/LVDS) panels to the front of the connector list, instead of
+ * painstakingly trying to initialize them in the right order.
+ */
void drm_helper_move_panel_connectors_to_head(struct drm_device *dev)
{
struct drm_connector *connector, *tmp;
@@ -82,22 +93,21 @@ static void drm_mode_validate_flag(struct drm_connector *connector,
/**
* drm_helper_probe_single_connector_modes - get complete set of display modes
- * @dev: DRM device
+ * @connector: connector to probe
* @maxX: max width for modes
* @maxY: max height for modes
*
* LOCKING:
* Caller must hold mode config lock.
*
- * Based on @dev's mode_config layout, scan all the connectors and try to detect
- * modes on them. Modes will first be added to the connector's probed_modes
- * list, then culled (based on validity and the @maxX, @maxY parameters) and
- * put into the normal modes list.
- *
- * Intended to be used either at bootup time or when major configuration
- * changes have occurred.
+ * Based on the helper callbacks implemented by @connector try to detect all
+ * valid modes. Modes will first be added to the connector's probed_modes list,
+ * then culled (based on validity and the @maxX, @maxY parameters) and put into
+ * the normal modes list.
*
- * FIXME: take into account monitor limits
+ * Intended to be use as a generic implementation of the ->probe() @connector
+ * callback for drivers that use the crtc helpers for output mode filtering and
+ * detection.
*
* RETURNS:
* Number of modes found on @connector.
@@ -348,17 +358,24 @@ drm_crtc_prepare_encoders(struct drm_device *dev)
}
/**
- * drm_crtc_set_mode - set a mode
+ * drm_crtc_helper_set_mode - internal helper to set a mode
* @crtc: CRTC to program
* @mode: mode to use
* @x: horizontal offset into the surface
* @y: vertical offset into the surface
+ * @old_fb: old framebuffer, for cleanup
*
* LOCKING:
* Caller must hold mode config lock.
*
* Try to set @mode on @crtc. Give @crtc and its associated connectors a chance
- * to fixup or reject the mode prior to trying to set it.
+ * to fixup or reject the mode prior to trying to set it. This is an internal
+ * helper that drivers could e.g. use to update properties that require the
+ * entire output pipe to be disabled and re-enabled in a new configuration. For
+ * example for changing whether audio is enabled on a hdmi link or for changing
+ * panel fitter or dither attributes. It is also called by the
+ * drm_crtc_helper_set_config() helper function to drive the mode setting
+ * sequence.
*
* RETURNS:
* True if the mode was set successfully, or false otherwise.
@@ -514,20 +531,19 @@ drm_crtc_helper_disable(struct drm_crtc *crtc)
/**
* drm_crtc_helper_set_config - set a new config from userspace
- * @crtc: CRTC to setup
- * @crtc_info: user provided configuration
- * @new_mode: new mode to set
- * @connector_set: set of connectors for the new config
- * @fb: new framebuffer
+ * @set: mode set configuration
*
* LOCKING:
* Caller must hold mode config lock.
*
- * Setup a new configuration, provided by the user in @crtc_info, and enable
- * it.
+ * Setup a new configuration, provided by the upper layers (either an ioctl call
+ * from userspace or internally e.g. from the fbdev suppport code) in @set, and
+ * enable it. This is the main helper functions for drivers that implement
+ * kernel mode setting with the crtc helper functions and the assorted
+ * ->prepare(), ->modeset() and ->commit() helper callbacks.
*
* RETURNS:
- * Zero. (FIXME)
+ * Returns 0 on success, -ERRNO on failure.
*/
int drm_crtc_helper_set_config(struct drm_mode_set *set)
{
@@ -823,12 +839,14 @@ static int drm_helper_choose_crtc_dpms(struct drm_crtc *crtc)
}
/**
- * drm_helper_connector_dpms
- * @connector affected connector
- * @mode DPMS mode
+ * drm_helper_connector_dpms() - connector dpms helper implementation
+ * @connector: affected connector
+ * @mode: DPMS mode
*
- * Calls the low-level connector DPMS function, then
- * calls appropriate encoder and crtc DPMS functions as well
+ * This is the main helper function provided by the crtc helper framework for
+ * implementing the DPMS connector attribute. It computes the new desired DPMS
+ * state for all encoders and crtcs in the output mesh and calls the ->dpms()
+ * callback provided by the driver appropriately.
*/
void drm_helper_connector_dpms(struct drm_connector *connector, int mode)
{
diff --git a/drivers/gpu/drm/drm_dp_helper.c b/drivers/gpu/drm/drm_dp_helper.c
index 3c4cccd0d753..89e196627160 100644
--- a/drivers/gpu/drm/drm_dp_helper.c
+++ b/drivers/gpu/drm/drm_dp_helper.c
@@ -30,6 +30,15 @@
#include <drm/drm_dp_helper.h>
#include <drm/drmP.h>
+/**
+ * DOC: dp helpers
+ *
+ * These functions contain some common logic and helpers at various abstraction
+ * levels to deal with Display Port sink devices and related things like DP aux
+ * channel transfers, EDID reading over DP aux channels, decoding certain DPCD
+ * blocks, ...
+ */
+
/* Run a single AUX_CH I2C transaction, writing/reading data as necessary */
static int
i2c_algo_dp_aux_transaction(struct i2c_adapter *adapter, int mode,
@@ -193,6 +202,18 @@ i2c_dp_aux_prepare_bus(struct i2c_adapter *adapter)
return 0;
}
+/**
+ * i2c_dp_aux_add_bus() - register an i2c adapter using the aux ch helper
+ * @adapter: i2c adapter to register
+ *
+ * This registers an i2c adapater that uses dp aux channel as it's underlaying
+ * transport. The driver needs to fill out the &i2c_algo_dp_aux_data structure
+ * and store it in the algo_data member of the @adapter argument. This will be
+ * used by the i2c over dp aux algorithm to drive the hardware.
+ *
+ * RETURNS:
+ * 0 on success, -ERRNO on failure.
+ */
int
i2c_dp_aux_add_bus(struct i2c_adapter *adapter)
{
diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c
index 011bd4f4635e..5a3770fbd770 100644
--- a/drivers/gpu/drm/drm_edid.c
+++ b/drivers/gpu/drm/drm_edid.c
@@ -1639,7 +1639,7 @@ parse_hdmi_vsdb(struct drm_connector *connector, const u8 *db)
if (len >= 12)
connector->audio_latency[1] = db[12];
- DRM_LOG_KMS("HDMI: DVI dual %d, "
+ DRM_DEBUG_KMS("HDMI: DVI dual %d, "
"max TMDS clock %d, "
"latency present %d %d, "
"video latency %d %d, "
diff --git a/drivers/gpu/drm/drm_fb_helper.c b/drivers/gpu/drm/drm_fb_helper.c
index 2c44af691e98..954d175bd7fa 100644
--- a/drivers/gpu/drm/drm_fb_helper.c
+++ b/drivers/gpu/drm/drm_fb_helper.c
@@ -45,6 +45,15 @@ MODULE_LICENSE("GPL and additional rights");
static LIST_HEAD(kernel_fb_helper_list);
+/**
+ * DOC: fbdev helpers
+ *
+ * The fb helper functions are useful to provide an fbdev on top of a drm kernel
+ * mode setting driver. They can be used mostly independantely from the crtc
+ * helper functions used by many drivers to implement the kernel mode setting
+ * interfaces.
+ */
+
/* simple single crtc case helper function */
int drm_fb_helper_single_add_all_connectors(struct drm_fb_helper *fb_helper)
{
@@ -339,7 +348,7 @@ static void drm_fb_helper_dpms(struct fb_info *info, int dpms_mode)
for (j = 0; j < fb_helper->connector_count; j++) {
connector = fb_helper->connector_info[j]->connector;
connector->funcs->dpms(connector, dpms_mode);
- drm_connector_property_set_value(connector,
+ drm_object_property_set_value(&connector->base,
dev->mode_config.dpms_property, dpms_mode);
}
}
@@ -1302,12 +1311,14 @@ out:
/**
* drm_helper_initial_config - setup a sane initial connector configuration
- * @dev: DRM device
+ * @fb_helper: fb_helper device struct
+ * @bpp_sel: bpp value to use for the framebuffer configuration
*
* LOCKING:
- * Called at init time, must take mode config lock.
+ * Called at init time by the driver to set up the @fb_helper initial
+ * configuration, must take the mode config lock.
*
- * Scan the CRTCs and connectors and try to put together an initial setup.
+ * Scans the CRTCs and connectors and tries to put together an initial setup.
* At the moment, this is a cloned configuration across all heads with
* a new framebuffer object as the backing store.
*
@@ -1341,7 +1352,7 @@ EXPORT_SYMBOL(drm_fb_helper_initial_config);
/**
* drm_fb_helper_hotplug_event - respond to a hotplug notification by
- * probing all the outputs attached to the fb.
+ * probing all the outputs attached to the fb
* @fb_helper: the drm_fb_helper
*
* LOCKING:
diff --git a/drivers/gpu/drm/drm_fops.c b/drivers/gpu/drm/drm_fops.c
index 7ef1b673e1be..133b4132983e 100644
--- a/drivers/gpu/drm/drm_fops.c
+++ b/drivers/gpu/drm/drm_fops.c
@@ -121,6 +121,8 @@ int drm_open(struct inode *inode, struct file *filp)
int minor_id = iminor(inode);
struct drm_minor *minor;
int retcode = 0;
+ int need_setup = 0;
+ struct address_space *old_mapping;
minor = idr_find(&drm_minors_idr, minor_id);
if (!minor)
@@ -132,23 +134,37 @@ int drm_open(struct inode *inode, struct file *filp)
if (drm_device_is_unplugged(dev))
return -ENODEV;
+ if (!dev->open_count++)
+ need_setup = 1;
+ mutex_lock(&dev->struct_mutex);
+ old_mapping = dev->dev_mapping;
+ if (old_mapping == NULL)
+ dev->dev_mapping = &inode->i_data;
+ /* ihold ensures nobody can remove inode with our i_data */
+ ihold(container_of(dev->dev_mapping, struct inode, i_data));
+ inode->i_mapping = dev->dev_mapping;
+ filp->f_mapping = dev->dev_mapping;
+ mutex_unlock(&dev->struct_mutex);
+
retcode = drm_open_helper(inode, filp, dev);
- if (!retcode) {
- atomic_inc(&dev->counts[_DRM_STAT_OPENS]);
- if (!dev->open_count++)
- retcode = drm_setup(dev);
- }
- if (!retcode) {
- mutex_lock(&dev->struct_mutex);
- if (dev->dev_mapping == NULL)
- dev->dev_mapping = &inode->i_data;
- /* ihold ensures nobody can remove inode with our i_data */
- ihold(container_of(dev->dev_mapping, struct inode, i_data));
- inode->i_mapping = dev->dev_mapping;
- filp->f_mapping = dev->dev_mapping;
- mutex_unlock(&dev->struct_mutex);
+ if (retcode)
+ goto err_undo;
+ atomic_inc(&dev->counts[_DRM_STAT_OPENS]);
+ if (need_setup) {
+ retcode = drm_setup(dev);
+ if (retcode)
+ goto err_undo;
}
+ return 0;
+err_undo:
+ mutex_lock(&dev->struct_mutex);
+ filp->f_mapping = old_mapping;
+ inode->i_mapping = old_mapping;
+ iput(container_of(dev->dev_mapping, struct inode, i_data));
+ dev->dev_mapping = old_mapping;
+ mutex_unlock(&dev->struct_mutex);
+ dev->open_count--;
return retcode;
}
EXPORT_SYMBOL(drm_open);
diff --git a/drivers/gpu/drm/drm_hashtab.c b/drivers/gpu/drm/drm_hashtab.c
index 5729e390aa4e..80254547a3f8 100644
--- a/drivers/gpu/drm/drm_hashtab.c
+++ b/drivers/gpu/drm/drm_hashtab.c
@@ -67,7 +67,7 @@ void drm_ht_verbose_list(struct drm_open_hash *ht, unsigned long key)
hashed_key = hash_long(key, ht->order);
DRM_DEBUG("Key is 0x%08lx, Hashed key is 0x%08x\n", key, hashed_key);
h_list = &ht->table[hashed_key];
- hlist_for_each_entry_rcu(entry, list, h_list, head)
+ hlist_for_each_entry(entry, list, h_list, head)
DRM_DEBUG("count %d, key: 0x%08lx\n", count++, entry->key);
}
@@ -81,7 +81,7 @@ static struct hlist_node *drm_ht_find_key(struct drm_open_hash *ht,
hashed_key = hash_long(key, ht->order);
h_list = &ht->table[hashed_key];
- hlist_for_each_entry_rcu(entry, list, h_list, head) {
+ hlist_for_each_entry(entry, list, h_list, head) {
if (entry->key == key)
return list;
if (entry->key > key)
@@ -90,6 +90,24 @@ static struct hlist_node *drm_ht_find_key(struct drm_open_hash *ht,
return NULL;
}
+static struct hlist_node *drm_ht_find_key_rcu(struct drm_open_hash *ht,
+ unsigned long key)
+{
+ struct drm_hash_item *entry;
+ struct hlist_head *h_list;
+ struct hlist_node *list;
+ unsigned int hashed_key;
+
+ hashed_key = hash_long(key, ht->order);
+ h_list = &ht->table[hashed_key];
+ hlist_for_each_entry_rcu(entry, list, h_list, head) {
+ if (entry->key == key)
+ return list;
+ if (entry->key > key)
+ break;
+ }
+ return NULL;
+}
int drm_ht_insert_item(struct drm_open_hash *ht, struct drm_hash_item *item)
{
@@ -102,7 +120,7 @@ int drm_ht_insert_item(struct drm_open_hash *ht, struct drm_hash_item *item)
hashed_key = hash_long(key, ht->order);
h_list = &ht->table[hashed_key];
parent = NULL;
- hlist_for_each_entry_rcu(entry, list, h_list, head) {
+ hlist_for_each_entry(entry, list, h_list, head) {
if (entry->key == key)
return -EINVAL;
if (entry->key > key)
@@ -152,7 +170,7 @@ int drm_ht_find_item(struct drm_open_hash *ht, unsigned long key,
{
struct hlist_node *list;
- list = drm_ht_find_key(ht, key);
+ list = drm_ht_find_key_rcu(ht, key);
if (!list)
return -EINVAL;
diff --git a/drivers/gpu/drm/drm_irq.c b/drivers/gpu/drm/drm_irq.c
index 2ba9d7fac345..19c01ca3cc76 100644
--- a/drivers/gpu/drm/drm_irq.c
+++ b/drivers/gpu/drm/drm_irq.c
@@ -1021,6 +1021,8 @@ void drm_vblank_off(struct drm_device *dev, int crtc)
/* Send any queued vblank events, lest the natives grow disquiet */
seq = drm_vblank_count_and_time(dev, crtc, &now);
+
+ spin_lock(&dev->event_lock);
list_for_each_entry_safe(e, t, &dev->vblank_event_list, base.link) {
if (e->pipe != crtc)
continue;
@@ -1031,6 +1033,7 @@ void drm_vblank_off(struct drm_device *dev, int crtc)
drm_vblank_put(dev, e->pipe);
send_vblank_event(dev, e, seq, &now);
}
+ spin_unlock(&dev->event_lock);
spin_unlock_irqrestore(&dev->vbl_lock, irqflags);
}
diff --git a/drivers/gpu/drm/drm_pci.c b/drivers/gpu/drm/drm_pci.c
index ba33144257e5..754bc96e10c7 100644
--- a/drivers/gpu/drm/drm_pci.c
+++ b/drivers/gpu/drm/drm_pci.c
@@ -470,7 +470,7 @@ int drm_pcie_get_speed_cap_mask(struct drm_device *dev, u32 *mask)
{
struct pci_dev *root;
int pos;
- u32 lnkcap, lnkcap2;
+ u32 lnkcap = 0, lnkcap2 = 0;
*mask = 0;
if (!dev->pdev)
diff --git a/drivers/gpu/drm/drm_sysfs.c b/drivers/gpu/drm/drm_sysfs.c
index 05cd8fe062af..02296653a058 100644
--- a/drivers/gpu/drm/drm_sysfs.c
+++ b/drivers/gpu/drm/drm_sysfs.c
@@ -182,7 +182,7 @@ static ssize_t dpms_show(struct device *device,
uint64_t dpms_status;
int ret;
- ret = drm_connector_property_get_value(connector,
+ ret = drm_object_property_get_value(&connector->base,
dev->mode_config.dpms_property,
&dpms_status);
if (ret)
@@ -277,7 +277,7 @@ static ssize_t subconnector_show(struct device *device,
return 0;
}
- ret = drm_connector_property_get_value(connector, prop, &subconnector);
+ ret = drm_object_property_get_value(&connector->base, prop, &subconnector);
if (ret)
return 0;
@@ -318,7 +318,7 @@ static ssize_t select_subconnector_show(struct device *device,
return 0;
}
- ret = drm_connector_property_get_value(connector, prop, &subconnector);
+ ret = drm_object_property_get_value(&connector->base, prop, &subconnector);
if (ret)
return 0;
diff --git a/drivers/gpu/drm/exynos/Kconfig b/drivers/gpu/drm/exynos/Kconfig
index fc345d4ebb03..1d1f1e5e33f0 100644
--- a/drivers/gpu/drm/exynos/Kconfig
+++ b/drivers/gpu/drm/exynos/Kconfig
@@ -10,6 +10,12 @@ config DRM_EXYNOS
Choose this option if you have a Samsung SoC EXYNOS chipset.
If M is selected the module will be called exynosdrm.
+config DRM_EXYNOS_IOMMU
+ bool "EXYNOS DRM IOMMU Support"
+ depends on DRM_EXYNOS && EXYNOS_IOMMU && ARM_DMA_USE_IOMMU
+ help
+ Choose this option if you want to use IOMMU feature for DRM.
+
config DRM_EXYNOS_DMABUF
bool "EXYNOS DRM DMABUF"
depends on DRM_EXYNOS
@@ -39,3 +45,27 @@ config DRM_EXYNOS_G2D
depends on DRM_EXYNOS && !VIDEO_SAMSUNG_S5P_G2D
help
Choose this option if you want to use Exynos G2D for DRM.
+
+config DRM_EXYNOS_IPP
+ bool "Exynos DRM IPP"
+ depends on DRM_EXYNOS
+ help
+ Choose this option if you want to use IPP feature for DRM.
+
+config DRM_EXYNOS_FIMC
+ bool "Exynos DRM FIMC"
+ depends on DRM_EXYNOS_IPP
+ help
+ Choose this option if you want to use Exynos FIMC for DRM.
+
+config DRM_EXYNOS_ROTATOR
+ bool "Exynos DRM Rotator"
+ depends on DRM_EXYNOS_IPP
+ help
+ Choose this option if you want to use Exynos Rotator for DRM.
+
+config DRM_EXYNOS_GSC
+ bool "Exynos DRM GSC"
+ depends on DRM_EXYNOS_IPP && ARCH_EXYNOS5
+ help
+ Choose this option if you want to use Exynos GSC for DRM.
diff --git a/drivers/gpu/drm/exynos/Makefile b/drivers/gpu/drm/exynos/Makefile
index eb651ca8e2a8..639b49e1ec05 100644
--- a/drivers/gpu/drm/exynos/Makefile
+++ b/drivers/gpu/drm/exynos/Makefile
@@ -8,6 +8,7 @@ exynosdrm-y := exynos_drm_drv.o exynos_drm_encoder.o exynos_drm_connector.o \
exynos_drm_buf.o exynos_drm_gem.o exynos_drm_core.o \
exynos_drm_plane.o
+exynosdrm-$(CONFIG_DRM_EXYNOS_IOMMU) += exynos_drm_iommu.o
exynosdrm-$(CONFIG_DRM_EXYNOS_DMABUF) += exynos_drm_dmabuf.o
exynosdrm-$(CONFIG_DRM_EXYNOS_FIMD) += exynos_drm_fimd.o
exynosdrm-$(CONFIG_DRM_EXYNOS_HDMI) += exynos_hdmi.o exynos_mixer.o \
@@ -15,5 +16,9 @@ exynosdrm-$(CONFIG_DRM_EXYNOS_HDMI) += exynos_hdmi.o exynos_mixer.o \
exynos_drm_hdmi.o
exynosdrm-$(CONFIG_DRM_EXYNOS_VIDI) += exynos_drm_vidi.o
exynosdrm-$(CONFIG_DRM_EXYNOS_G2D) += exynos_drm_g2d.o
+exynosdrm-$(CONFIG_DRM_EXYNOS_IPP) += exynos_drm_ipp.o
+exynosdrm-$(CONFIG_DRM_EXYNOS_FIMC) += exynos_drm_fimc.o
+exynosdrm-$(CONFIG_DRM_EXYNOS_ROTATOR) += exynos_drm_rotator.o
+exynosdrm-$(CONFIG_DRM_EXYNOS_GSC) += exynos_drm_gsc.o
obj-$(CONFIG_DRM_EXYNOS) += exynosdrm.o
diff --git a/drivers/gpu/drm/exynos/exynos_ddc.c b/drivers/gpu/drm/exynos/exynos_ddc.c
index 37e6ec704e1d..bef43e0342a6 100644
--- a/drivers/gpu/drm/exynos/exynos_ddc.c
+++ b/drivers/gpu/drm/exynos/exynos_ddc.c
@@ -48,6 +48,7 @@ static struct i2c_device_id ddc_idtable[] = {
{ },
};
+#ifdef CONFIG_OF
static struct of_device_id hdmiddc_match_types[] = {
{
.compatible = "samsung,exynos5-hdmiddc",
@@ -55,12 +56,13 @@ static struct of_device_id hdmiddc_match_types[] = {
/* end node */
}
};
+#endif
struct i2c_driver ddc_driver = {
.driver = {
.name = "exynos-hdmiddc",
.owner = THIS_MODULE,
- .of_match_table = hdmiddc_match_types,
+ .of_match_table = of_match_ptr(hdmiddc_match_types),
},
.id_table = ddc_idtable,
.probe = s5p_ddc_probe,
diff --git a/drivers/gpu/drm/exynos/exynos_drm_buf.c b/drivers/gpu/drm/exynos/exynos_drm_buf.c
index 118c117b3226..9601bad47a2e 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_buf.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_buf.c
@@ -33,89 +33,64 @@
static int lowlevel_buffer_allocate(struct drm_device *dev,
unsigned int flags, struct exynos_drm_gem_buf *buf)
{
- dma_addr_t start_addr;
- unsigned int npages, i = 0;
- struct scatterlist *sgl;
int ret = 0;
+ enum dma_attr attr;
+ unsigned int nr_pages;
DRM_DEBUG_KMS("%s\n", __FILE__);
- if (IS_NONCONTIG_BUFFER(flags)) {
- DRM_DEBUG_KMS("not support allocation type.\n");
- return -EINVAL;
- }
-
if (buf->dma_addr) {
DRM_DEBUG_KMS("already allocated.\n");
return 0;
}
- if (buf->size >= SZ_1M) {
- npages = buf->size >> SECTION_SHIFT;
- buf->page_size = SECTION_SIZE;
- } else if (buf->size >= SZ_64K) {
- npages = buf->size >> 16;
- buf->page_size = SZ_64K;
- } else {
- npages = buf->size >> PAGE_SHIFT;
- buf->page_size = PAGE_SIZE;
- }
+ init_dma_attrs(&buf->dma_attrs);
- buf->sgt = kzalloc(sizeof(struct sg_table), GFP_KERNEL);
- if (!buf->sgt) {
- DRM_ERROR("failed to allocate sg table.\n");
- return -ENOMEM;
- }
+ /*
+ * if EXYNOS_BO_CONTIG, fully physically contiguous memory
+ * region will be allocated else physically contiguous
+ * as possible.
+ */
+ if (flags & EXYNOS_BO_CONTIG)
+ dma_set_attr(DMA_ATTR_FORCE_CONTIGUOUS, &buf->dma_attrs);
- ret = sg_alloc_table(buf->sgt, npages, GFP_KERNEL);
- if (ret < 0) {
- DRM_ERROR("failed to initialize sg table.\n");
- kfree(buf->sgt);
- buf->sgt = NULL;
- return -ENOMEM;
- }
+ /*
+ * if EXYNOS_BO_WC or EXYNOS_BO_NONCACHABLE, writecombine mapping
+ * else cachable mapping.
+ */
+ if (flags & EXYNOS_BO_WC || !(flags & EXYNOS_BO_CACHABLE))
+ attr = DMA_ATTR_WRITE_COMBINE;
+ else
+ attr = DMA_ATTR_NON_CONSISTENT;
- buf->kvaddr = dma_alloc_writecombine(dev->dev, buf->size,
- &buf->dma_addr, GFP_KERNEL);
- if (!buf->kvaddr) {
- DRM_ERROR("failed to allocate buffer.\n");
- ret = -ENOMEM;
- goto err1;
- }
+ dma_set_attr(attr, &buf->dma_attrs);
+ dma_set_attr(DMA_ATTR_NO_KERNEL_MAPPING, &buf->dma_attrs);
- buf->pages = kzalloc(sizeof(struct page) * npages, GFP_KERNEL);
+ buf->pages = dma_alloc_attrs(dev->dev, buf->size,
+ &buf->dma_addr, GFP_KERNEL, &buf->dma_attrs);
if (!buf->pages) {
- DRM_ERROR("failed to allocate pages.\n");
- ret = -ENOMEM;
- goto err2;
+ DRM_ERROR("failed to allocate buffer.\n");
+ return -ENOMEM;
}
- sgl = buf->sgt->sgl;
- start_addr = buf->dma_addr;
-
- while (i < npages) {
- buf->pages[i] = phys_to_page(start_addr);
- sg_set_page(sgl, buf->pages[i], buf->page_size, 0);
- sg_dma_address(sgl) = start_addr;
- start_addr += buf->page_size;
- sgl = sg_next(sgl);
- i++;
+ nr_pages = buf->size >> PAGE_SHIFT;
+ buf->sgt = drm_prime_pages_to_sg(buf->pages, nr_pages);
+ if (!buf->sgt) {
+ DRM_ERROR("failed to get sg table.\n");
+ ret = -ENOMEM;
+ goto err_free_attrs;
}
- DRM_DEBUG_KMS("vaddr(0x%lx), dma_addr(0x%lx), size(0x%lx)\n",
- (unsigned long)buf->kvaddr,
+ DRM_DEBUG_KMS("dma_addr(0x%lx), size(0x%lx)\n",
(unsigned long)buf->dma_addr,
buf->size);
return ret;
-err2:
- dma_free_writecombine(dev->dev, buf->size, buf->kvaddr,
- (dma_addr_t)buf->dma_addr);
+
+err_free_attrs:
+ dma_free_attrs(dev->dev, buf->size, buf->pages,
+ (dma_addr_t)buf->dma_addr, &buf->dma_attrs);
buf->dma_addr = (dma_addr_t)NULL;
-err1:
- sg_free_table(buf->sgt);
- kfree(buf->sgt);
- buf->sgt = NULL;
return ret;
}
@@ -125,23 +100,12 @@ static void lowlevel_buffer_deallocate(struct drm_device *dev,
{
DRM_DEBUG_KMS("%s.\n", __FILE__);
- /*
- * release only physically continuous memory and
- * non-continuous memory would be released by exynos
- * gem framework.
- */
- if (IS_NONCONTIG_BUFFER(flags)) {
- DRM_DEBUG_KMS("not support allocation type.\n");
- return;
- }
-
if (!buf->dma_addr) {
DRM_DEBUG_KMS("dma_addr is invalid.\n");
return;
}
- DRM_DEBUG_KMS("vaddr(0x%lx), dma_addr(0x%lx), size(0x%lx)\n",
- (unsigned long)buf->kvaddr,
+ DRM_DEBUG_KMS("dma_addr(0x%lx), size(0x%lx)\n",
(unsigned long)buf->dma_addr,
buf->size);
@@ -150,11 +114,8 @@ static void lowlevel_buffer_deallocate(struct drm_device *dev,
kfree(buf->sgt);
buf->sgt = NULL;
- kfree(buf->pages);
- buf->pages = NULL;
-
- dma_free_writecombine(dev->dev, buf->size, buf->kvaddr,
- (dma_addr_t)buf->dma_addr);
+ dma_free_attrs(dev->dev, buf->size, buf->pages,
+ (dma_addr_t)buf->dma_addr, &buf->dma_attrs);
buf->dma_addr = (dma_addr_t)NULL;
}
diff --git a/drivers/gpu/drm/exynos/exynos_drm_buf.h b/drivers/gpu/drm/exynos/exynos_drm_buf.h
index 3388e4eb4ba2..25cf16285033 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_buf.h
+++ b/drivers/gpu/drm/exynos/exynos_drm_buf.h
@@ -34,12 +34,12 @@ struct exynos_drm_gem_buf *exynos_drm_init_buf(struct drm_device *dev,
void exynos_drm_fini_buf(struct drm_device *dev,
struct exynos_drm_gem_buf *buffer);
-/* allocate physical memory region and setup sgt and pages. */
+/* allocate physical memory region and setup sgt. */
int exynos_drm_alloc_buf(struct drm_device *dev,
struct exynos_drm_gem_buf *buf,
unsigned int flags);
-/* release physical memory region, sgt and pages. */
+/* release physical memory region, and sgt. */
void exynos_drm_free_buf(struct drm_device *dev,
unsigned int flags,
struct exynos_drm_gem_buf *buffer);
diff --git a/drivers/gpu/drm/exynos/exynos_drm_crtc.c b/drivers/gpu/drm/exynos/exynos_drm_crtc.c
index fce245f64c4f..2efa4b031d73 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_crtc.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_crtc.c
@@ -236,16 +236,21 @@ static int exynos_drm_crtc_page_flip(struct drm_crtc *crtc,
goto out;
}
+ spin_lock_irq(&dev->event_lock);
list_add_tail(&event->base.link,
&dev_priv->pageflip_event_list);
+ spin_unlock_irq(&dev->event_lock);
crtc->fb = fb;
ret = exynos_drm_crtc_mode_set_base(crtc, crtc->x, crtc->y,
NULL);
if (ret) {
crtc->fb = old_fb;
+
+ spin_lock_irq(&dev->event_lock);
drm_vblank_put(dev, exynos_crtc->pipe);
list_del(&event->base.link);
+ spin_unlock_irq(&dev->event_lock);
goto out;
}
diff --git a/drivers/gpu/drm/exynos/exynos_drm_dmabuf.c b/drivers/gpu/drm/exynos/exynos_drm_dmabuf.c
index fae1f2ec886c..61d5a8402eb8 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_dmabuf.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_dmabuf.c
@@ -30,70 +30,108 @@
#include <linux/dma-buf.h>
-static struct sg_table *exynos_pages_to_sg(struct page **pages, int nr_pages,
- unsigned int page_size)
+struct exynos_drm_dmabuf_attachment {
+ struct sg_table sgt;
+ enum dma_data_direction dir;
+};
+
+static int exynos_gem_attach_dma_buf(struct dma_buf *dmabuf,
+ struct device *dev,
+ struct dma_buf_attachment *attach)
{
- struct sg_table *sgt = NULL;
- struct scatterlist *sgl;
- int i, ret;
+ struct exynos_drm_dmabuf_attachment *exynos_attach;
- sgt = kzalloc(sizeof(*sgt), GFP_KERNEL);
- if (!sgt)
- goto out;
+ exynos_attach = kzalloc(sizeof(*exynos_attach), GFP_KERNEL);
+ if (!exynos_attach)
+ return -ENOMEM;
- ret = sg_alloc_table(sgt, nr_pages, GFP_KERNEL);
- if (ret)
- goto err_free_sgt;
+ exynos_attach->dir = DMA_NONE;
+ attach->priv = exynos_attach;
- if (page_size < PAGE_SIZE)
- page_size = PAGE_SIZE;
+ return 0;
+}
- for_each_sg(sgt->sgl, sgl, nr_pages, i)
- sg_set_page(sgl, pages[i], page_size, 0);
+static void exynos_gem_detach_dma_buf(struct dma_buf *dmabuf,
+ struct dma_buf_attachment *attach)
+{
+ struct exynos_drm_dmabuf_attachment *exynos_attach = attach->priv;
+ struct sg_table *sgt;
- return sgt;
+ if (!exynos_attach)
+ return;
-err_free_sgt:
- kfree(sgt);
- sgt = NULL;
-out:
- return NULL;
+ sgt = &exynos_attach->sgt;
+
+ if (exynos_attach->dir != DMA_NONE)
+ dma_unmap_sg(attach->dev, sgt->sgl, sgt->nents,
+ exynos_attach->dir);
+
+ sg_free_table(sgt);
+ kfree(exynos_attach);
+ attach->priv = NULL;
}
static struct sg_table *
exynos_gem_map_dma_buf(struct dma_buf_attachment *attach,
enum dma_data_direction dir)
{
+ struct exynos_drm_dmabuf_attachment *exynos_attach = attach->priv;
struct exynos_drm_gem_obj *gem_obj = attach->dmabuf->priv;
struct drm_device *dev = gem_obj->base.dev;
struct exynos_drm_gem_buf *buf;
+ struct scatterlist *rd, *wr;
struct sg_table *sgt = NULL;
- unsigned int npages;
- int nents;
+ unsigned int i;
+ int nents, ret;
DRM_DEBUG_PRIME("%s\n", __FILE__);
- mutex_lock(&dev->struct_mutex);
+ if (WARN_ON(dir == DMA_NONE))
+ return ERR_PTR(-EINVAL);
+
+ /* just return current sgt if already requested. */
+ if (exynos_attach->dir == dir)
+ return &exynos_attach->sgt;
+
+ /* reattaching is not allowed. */
+ if (WARN_ON(exynos_attach->dir != DMA_NONE))
+ return ERR_PTR(-EBUSY);
buf = gem_obj->buffer;
+ if (!buf) {
+ DRM_ERROR("buffer is null.\n");
+ return ERR_PTR(-ENOMEM);
+ }
- /* there should always be pages allocated. */
- if (!buf->pages) {
- DRM_ERROR("pages is null.\n");
- goto err_unlock;
+ sgt = &exynos_attach->sgt;
+
+ ret = sg_alloc_table(sgt, buf->sgt->orig_nents, GFP_KERNEL);
+ if (ret) {
+ DRM_ERROR("failed to alloc sgt.\n");
+ return ERR_PTR(-ENOMEM);
}
- npages = buf->size / buf->page_size;
+ mutex_lock(&dev->struct_mutex);
- sgt = exynos_pages_to_sg(buf->pages, npages, buf->page_size);
- if (!sgt) {
- DRM_DEBUG_PRIME("exynos_pages_to_sg returned NULL!\n");
+ rd = buf->sgt->sgl;
+ wr = sgt->sgl;
+ for (i = 0; i < sgt->orig_nents; ++i) {
+ sg_set_page(wr, sg_page(rd), rd->length, rd->offset);
+ rd = sg_next(rd);
+ wr = sg_next(wr);
+ }
+
+ nents = dma_map_sg(attach->dev, sgt->sgl, sgt->orig_nents, dir);
+ if (!nents) {
+ DRM_ERROR("failed to map sgl with iommu.\n");
+ sgt = ERR_PTR(-EIO);
goto err_unlock;
}
- nents = dma_map_sg(attach->dev, sgt->sgl, sgt->nents, dir);
- DRM_DEBUG_PRIME("npages = %d buffer size = 0x%lx page_size = 0x%lx\n",
- npages, buf->size, buf->page_size);
+ exynos_attach->dir = dir;
+ attach->priv = exynos_attach;
+
+ DRM_DEBUG_PRIME("buffer size = 0x%lx\n", buf->size);
err_unlock:
mutex_unlock(&dev->struct_mutex);
@@ -104,10 +142,7 @@ static void exynos_gem_unmap_dma_buf(struct dma_buf_attachment *attach,
struct sg_table *sgt,
enum dma_data_direction dir)
{
- dma_unmap_sg(attach->dev, sgt->sgl, sgt->nents, dir);
- sg_free_table(sgt);
- kfree(sgt);
- sgt = NULL;
+ /* Nothing to do. */
}
static void exynos_dmabuf_release(struct dma_buf *dmabuf)
@@ -169,6 +204,8 @@ static int exynos_gem_dmabuf_mmap(struct dma_buf *dma_buf,
}
static struct dma_buf_ops exynos_dmabuf_ops = {
+ .attach = exynos_gem_attach_dma_buf,
+ .detach = exynos_gem_detach_dma_buf,
.map_dma_buf = exynos_gem_map_dma_buf,
.unmap_dma_buf = exynos_gem_unmap_dma_buf,
.kmap = exynos_gem_dmabuf_kmap,
@@ -196,7 +233,6 @@ struct drm_gem_object *exynos_dmabuf_prime_import(struct drm_device *drm_dev,
struct scatterlist *sgl;
struct exynos_drm_gem_obj *exynos_gem_obj;
struct exynos_drm_gem_buf *buffer;
- struct page *page;
int ret;
DRM_DEBUG_PRIME("%s\n", __FILE__);
@@ -233,38 +269,27 @@ struct drm_gem_object *exynos_dmabuf_prime_import(struct drm_device *drm_dev,
goto err_unmap_attach;
}
- buffer->pages = kzalloc(sizeof(*page) * sgt->nents, GFP_KERNEL);
- if (!buffer->pages) {
- DRM_ERROR("failed to allocate pages.\n");
- ret = -ENOMEM;
- goto err_free_buffer;
- }
-
exynos_gem_obj = exynos_drm_gem_init(drm_dev, dma_buf->size);
if (!exynos_gem_obj) {
ret = -ENOMEM;
- goto err_free_pages;
+ goto err_free_buffer;
}
sgl = sgt->sgl;
- if (sgt->nents == 1) {
- buffer->dma_addr = sg_dma_address(sgt->sgl);
- buffer->size = sg_dma_len(sgt->sgl);
+ buffer->size = dma_buf->size;
+ buffer->dma_addr = sg_dma_address(sgl);
+ if (sgt->nents == 1) {
/* always physically continuous memory if sgt->nents is 1. */
exynos_gem_obj->flags |= EXYNOS_BO_CONTIG;
} else {
- unsigned int i = 0;
-
- buffer->dma_addr = sg_dma_address(sgl);
- while (i < sgt->nents) {
- buffer->pages[i] = sg_page(sgl);
- buffer->size += sg_dma_len(sgl);
- sgl = sg_next(sgl);
- i++;
- }
-
+ /*
+ * this case could be CONTIG or NONCONTIG type but for now
+ * sets NONCONTIG.
+ * TODO. we have to find a way that exporter can notify
+ * the type of its own buffer to importer.
+ */
exynos_gem_obj->flags |= EXYNOS_BO_NONCONTIG;
}
@@ -277,9 +302,6 @@ struct drm_gem_object *exynos_dmabuf_prime_import(struct drm_device *drm_dev,
return &exynos_gem_obj->base;
-err_free_pages:
- kfree(buffer->pages);
- buffer->pages = NULL;
err_free_buffer:
kfree(buffer);
buffer = NULL;
diff --git a/drivers/gpu/drm/exynos/exynos_drm_drv.c b/drivers/gpu/drm/exynos/exynos_drm_drv.c
index 1de7baafddd0..e0a8e8024b01 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_drv.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_drv.c
@@ -40,6 +40,8 @@
#include "exynos_drm_vidi.h"
#include "exynos_drm_dmabuf.h"
#include "exynos_drm_g2d.h"
+#include "exynos_drm_ipp.h"
+#include "exynos_drm_iommu.h"
#define DRIVER_NAME "exynos"
#define DRIVER_DESC "Samsung SoC DRM"
@@ -49,6 +51,9 @@
#define VBLANK_OFF_DELAY 50000
+/* platform device pointer for eynos drm device. */
+static struct platform_device *exynos_drm_pdev;
+
static int exynos_drm_load(struct drm_device *dev, unsigned long flags)
{
struct exynos_drm_private *private;
@@ -66,6 +71,18 @@ static int exynos_drm_load(struct drm_device *dev, unsigned long flags)
INIT_LIST_HEAD(&private->pageflip_event_list);
dev->dev_private = (void *)private;
+ /*
+ * create mapping to manage iommu table and set a pointer to iommu
+ * mapping structure to iommu_mapping of private data.
+ * also this iommu_mapping can be used to check if iommu is supported
+ * or not.
+ */
+ ret = drm_create_iommu_mapping(dev);
+ if (ret < 0) {
+ DRM_ERROR("failed to create iommu mapping.\n");
+ goto err_crtc;
+ }
+
drm_mode_config_init(dev);
/* init kms poll for handling hpd */
@@ -80,7 +97,7 @@ static int exynos_drm_load(struct drm_device *dev, unsigned long flags)
for (nr = 0; nr < MAX_CRTC; nr++) {
ret = exynos_drm_crtc_create(dev, nr);
if (ret)
- goto err_crtc;
+ goto err_release_iommu_mapping;
}
for (nr = 0; nr < MAX_PLANE; nr++) {
@@ -89,12 +106,12 @@ static int exynos_drm_load(struct drm_device *dev, unsigned long flags)
plane = exynos_plane_init(dev, possible_crtcs, false);
if (!plane)
- goto err_crtc;
+ goto err_release_iommu_mapping;
}
ret = drm_vblank_init(dev, MAX_CRTC);
if (ret)
- goto err_crtc;
+ goto err_release_iommu_mapping;
/*
* probe sub drivers such as display controller and hdmi driver,
@@ -126,6 +143,8 @@ err_drm_device:
exynos_drm_device_unregister(dev);
err_vblank:
drm_vblank_cleanup(dev);
+err_release_iommu_mapping:
+ drm_release_iommu_mapping(dev);
err_crtc:
drm_mode_config_cleanup(dev);
kfree(private);
@@ -142,6 +161,8 @@ static int exynos_drm_unload(struct drm_device *dev)
drm_vblank_cleanup(dev);
drm_kms_helper_poll_fini(dev);
drm_mode_config_cleanup(dev);
+
+ drm_release_iommu_mapping(dev);
kfree(dev->dev_private);
dev->dev_private = NULL;
@@ -229,6 +250,14 @@ static struct drm_ioctl_desc exynos_ioctls[] = {
exynos_g2d_set_cmdlist_ioctl, DRM_UNLOCKED | DRM_AUTH),
DRM_IOCTL_DEF_DRV(EXYNOS_G2D_EXEC,
exynos_g2d_exec_ioctl, DRM_UNLOCKED | DRM_AUTH),
+ DRM_IOCTL_DEF_DRV(EXYNOS_IPP_GET_PROPERTY,
+ exynos_drm_ipp_get_property, DRM_UNLOCKED | DRM_AUTH),
+ DRM_IOCTL_DEF_DRV(EXYNOS_IPP_SET_PROPERTY,
+ exynos_drm_ipp_set_property, DRM_UNLOCKED | DRM_AUTH),
+ DRM_IOCTL_DEF_DRV(EXYNOS_IPP_QUEUE_BUF,
+ exynos_drm_ipp_queue_buf, DRM_UNLOCKED | DRM_AUTH),
+ DRM_IOCTL_DEF_DRV(EXYNOS_IPP_CMD_CTRL,
+ exynos_drm_ipp_cmd_ctrl, DRM_UNLOCKED | DRM_AUTH),
};
static const struct file_operations exynos_drm_driver_fops = {
@@ -279,6 +308,7 @@ static int exynos_drm_platform_probe(struct platform_device *pdev)
{
DRM_DEBUG_DRIVER("%s\n", __FILE__);
+ pdev->dev.coherent_dma_mask = DMA_BIT_MASK(32);
exynos_drm_driver.num_ioctls = DRM_ARRAY_SIZE(exynos_ioctls);
return drm_platform_init(&exynos_drm_driver, pdev);
@@ -324,6 +354,10 @@ static int __init exynos_drm_init(void)
ret = platform_driver_register(&exynos_drm_common_hdmi_driver);
if (ret < 0)
goto out_common_hdmi;
+
+ ret = exynos_platform_device_hdmi_register();
+ if (ret < 0)
+ goto out_common_hdmi_dev;
#endif
#ifdef CONFIG_DRM_EXYNOS_VIDI
@@ -338,24 +372,80 @@ static int __init exynos_drm_init(void)
goto out_g2d;
#endif
+#ifdef CONFIG_DRM_EXYNOS_FIMC
+ ret = platform_driver_register(&fimc_driver);
+ if (ret < 0)
+ goto out_fimc;
+#endif
+
+#ifdef CONFIG_DRM_EXYNOS_ROTATOR
+ ret = platform_driver_register(&rotator_driver);
+ if (ret < 0)
+ goto out_rotator;
+#endif
+
+#ifdef CONFIG_DRM_EXYNOS_GSC
+ ret = platform_driver_register(&gsc_driver);
+ if (ret < 0)
+ goto out_gsc;
+#endif
+
+#ifdef CONFIG_DRM_EXYNOS_IPP
+ ret = platform_driver_register(&ipp_driver);
+ if (ret < 0)
+ goto out_ipp;
+#endif
+
ret = platform_driver_register(&exynos_drm_platform_driver);
if (ret < 0)
+ goto out_drm;
+
+ exynos_drm_pdev = platform_device_register_simple("exynos-drm", -1,
+ NULL, 0);
+ if (IS_ERR_OR_NULL(exynos_drm_pdev)) {
+ ret = PTR_ERR(exynos_drm_pdev);
goto out;
+ }
return 0;
out:
+ platform_driver_unregister(&exynos_drm_platform_driver);
+
+out_drm:
+#ifdef CONFIG_DRM_EXYNOS_IPP
+ platform_driver_unregister(&ipp_driver);
+out_ipp:
+#endif
+
+#ifdef CONFIG_DRM_EXYNOS_GSC
+ platform_driver_unregister(&gsc_driver);
+out_gsc:
+#endif
+
+#ifdef CONFIG_DRM_EXYNOS_ROTATOR
+ platform_driver_unregister(&rotator_driver);
+out_rotator:
+#endif
+
+#ifdef CONFIG_DRM_EXYNOS_FIMC
+ platform_driver_unregister(&fimc_driver);
+out_fimc:
+#endif
+
#ifdef CONFIG_DRM_EXYNOS_G2D
platform_driver_unregister(&g2d_driver);
out_g2d:
#endif
#ifdef CONFIG_DRM_EXYNOS_VIDI
-out_vidi:
platform_driver_unregister(&vidi_driver);
+out_vidi:
#endif
#ifdef CONFIG_DRM_EXYNOS_HDMI
+ exynos_platform_device_hdmi_unregister();
+out_common_hdmi_dev:
platform_driver_unregister(&exynos_drm_common_hdmi_driver);
out_common_hdmi:
platform_driver_unregister(&mixer_driver);
@@ -375,13 +465,32 @@ static void __exit exynos_drm_exit(void)
{
DRM_DEBUG_DRIVER("%s\n", __FILE__);
+ platform_device_unregister(exynos_drm_pdev);
+
platform_driver_unregister(&exynos_drm_platform_driver);
+#ifdef CONFIG_DRM_EXYNOS_IPP
+ platform_driver_unregister(&ipp_driver);
+#endif
+
+#ifdef CONFIG_DRM_EXYNOS_GSC
+ platform_driver_unregister(&gsc_driver);
+#endif
+
+#ifdef CONFIG_DRM_EXYNOS_ROTATOR
+ platform_driver_unregister(&rotator_driver);
+#endif
+
+#ifdef CONFIG_DRM_EXYNOS_FIMC
+ platform_driver_unregister(&fimc_driver);
+#endif
+
#ifdef CONFIG_DRM_EXYNOS_G2D
platform_driver_unregister(&g2d_driver);
#endif
#ifdef CONFIG_DRM_EXYNOS_HDMI
+ exynos_platform_device_hdmi_unregister();
platform_driver_unregister(&exynos_drm_common_hdmi_driver);
platform_driver_unregister(&mixer_driver);
platform_driver_unregister(&hdmi_driver);
diff --git a/drivers/gpu/drm/exynos/exynos_drm_drv.h b/drivers/gpu/drm/exynos/exynos_drm_drv.h
index a34231036496..f5a97745bf93 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_drv.h
+++ b/drivers/gpu/drm/exynos/exynos_drm_drv.h
@@ -74,8 +74,6 @@ enum exynos_drm_output_type {
* @commit: apply hardware specific overlay data to registers.
* @enable: enable hardware specific overlay.
* @disable: disable hardware specific overlay.
- * @wait_for_vblank: wait for vblank interrupt to make sure that
- * hardware overlay is disabled.
*/
struct exynos_drm_overlay_ops {
void (*mode_set)(struct device *subdrv_dev,
@@ -83,7 +81,6 @@ struct exynos_drm_overlay_ops {
void (*commit)(struct device *subdrv_dev, int zpos);
void (*enable)(struct device *subdrv_dev, int zpos);
void (*disable)(struct device *subdrv_dev, int zpos);
- void (*wait_for_vblank)(struct device *subdrv_dev);
};
/*
@@ -110,7 +107,6 @@ struct exynos_drm_overlay_ops {
* @pixel_format: fourcc pixel format of this overlay
* @dma_addr: array of bus(accessed by dma) address to the memory region
* allocated for a overlay.
- * @vaddr: array of virtual memory addresss to this overlay.
* @zpos: order of overlay layer(z position).
* @default_win: a window to be enabled.
* @color_key: color key on or off.
@@ -142,7 +138,6 @@ struct exynos_drm_overlay {
unsigned int pitch;
uint32_t pixel_format;
dma_addr_t dma_addr[MAX_FB_BUFFER];
- void __iomem *vaddr[MAX_FB_BUFFER];
int zpos;
bool default_win;
@@ -186,6 +181,8 @@ struct exynos_drm_display_ops {
* @commit: set current hw specific display mode to hw.
* @enable_vblank: specific driver callback for enabling vblank interrupt.
* @disable_vblank: specific driver callback for disabling vblank interrupt.
+ * @wait_for_vblank: wait for vblank interrupt to make sure that
+ * hardware overlay is updated.
*/
struct exynos_drm_manager_ops {
void (*dpms)(struct device *subdrv_dev, int mode);
@@ -200,6 +197,7 @@ struct exynos_drm_manager_ops {
void (*commit)(struct device *subdrv_dev);
int (*enable_vblank)(struct device *subdrv_dev);
void (*disable_vblank)(struct device *subdrv_dev);
+ void (*wait_for_vblank)(struct device *subdrv_dev);
};
/*
@@ -231,16 +229,28 @@ struct exynos_drm_g2d_private {
struct device *dev;
struct list_head inuse_cmdlist;
struct list_head event_list;
- struct list_head gem_list;
- unsigned int gem_nr;
+ struct list_head userptr_list;
+};
+
+struct exynos_drm_ipp_private {
+ struct device *dev;
+ struct list_head event_list;
};
struct drm_exynos_file_private {
struct exynos_drm_g2d_private *g2d_priv;
+ struct exynos_drm_ipp_private *ipp_priv;
};
/*
* Exynos drm private structure.
+ *
+ * @da_start: start address to device address space.
+ * with iommu, device address space starts from this address
+ * otherwise default one.
+ * @da_space_size: size of device address space.
+ * if 0 then default value is used for it.
+ * @da_space_order: order to device address space.
*/
struct exynos_drm_private {
struct drm_fb_helper *fb_helper;
@@ -255,6 +265,10 @@ struct exynos_drm_private {
struct drm_crtc *crtc[MAX_CRTC];
struct drm_property *plane_zpos_property;
struct drm_property *crtc_mode_property;
+
+ unsigned long da_start;
+ unsigned long da_space_size;
+ unsigned long da_space_order;
};
/*
@@ -318,10 +332,25 @@ int exynos_drm_subdrv_unregister(struct exynos_drm_subdrv *drm_subdrv);
int exynos_drm_subdrv_open(struct drm_device *dev, struct drm_file *file);
void exynos_drm_subdrv_close(struct drm_device *dev, struct drm_file *file);
+/*
+ * this function registers exynos drm hdmi platform device. It ensures only one
+ * instance of the device is created.
+ */
+extern int exynos_platform_device_hdmi_register(void);
+
+/*
+ * this function unregisters exynos drm hdmi platform device if it exists.
+ */
+void exynos_platform_device_hdmi_unregister(void);
+
extern struct platform_driver fimd_driver;
extern struct platform_driver hdmi_driver;
extern struct platform_driver mixer_driver;
extern struct platform_driver exynos_drm_common_hdmi_driver;
extern struct platform_driver vidi_driver;
extern struct platform_driver g2d_driver;
+extern struct platform_driver fimc_driver;
+extern struct platform_driver rotator_driver;
+extern struct platform_driver gsc_driver;
+extern struct platform_driver ipp_driver;
#endif
diff --git a/drivers/gpu/drm/exynos/exynos_drm_encoder.c b/drivers/gpu/drm/exynos/exynos_drm_encoder.c
index 241ad1eeec64..301485215a70 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_encoder.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_encoder.c
@@ -226,8 +226,40 @@ static void exynos_drm_encoder_commit(struct drm_encoder *encoder)
* already updated or not by exynos_drm_encoder_dpms function.
*/
exynos_encoder->updated = true;
+
+ /*
+ * In case of setcrtc, there is no way to update encoder's dpms
+ * so update it here.
+ */
+ exynos_encoder->dpms = DRM_MODE_DPMS_ON;
+}
+
+void exynos_drm_encoder_complete_scanout(struct drm_framebuffer *fb)
+{
+ struct exynos_drm_encoder *exynos_encoder;
+ struct exynos_drm_manager_ops *ops;
+ struct drm_device *dev = fb->dev;
+ struct drm_encoder *encoder;
+
+ /*
+ * make sure that overlay data are updated to real hardware
+ * for all encoders.
+ */
+ list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
+ exynos_encoder = to_exynos_encoder(encoder);
+ ops = exynos_encoder->manager->ops;
+
+ /*
+ * wait for vblank interrupt
+ * - this makes sure that overlay data are updated to
+ * real hardware.
+ */
+ if (ops->wait_for_vblank)
+ ops->wait_for_vblank(exynos_encoder->manager->dev);
+ }
}
+
static void exynos_drm_encoder_disable(struct drm_encoder *encoder)
{
struct drm_plane *plane;
@@ -499,14 +531,4 @@ void exynos_drm_encoder_plane_disable(struct drm_encoder *encoder, void *data)
if (overlay_ops && overlay_ops->disable)
overlay_ops->disable(manager->dev, zpos);
-
- /*
- * wait for vblank interrupt
- * - this makes sure that hardware overlay is disabled to avoid
- * for the dma accesses to memory after gem buffer was released
- * because the setting for disabling the overlay will be updated
- * at vsync.
- */
- if (overlay_ops->wait_for_vblank)
- overlay_ops->wait_for_vblank(manager->dev);
}
diff --git a/drivers/gpu/drm/exynos/exynos_drm_encoder.h b/drivers/gpu/drm/exynos/exynos_drm_encoder.h
index 6470d9ddf5a1..88bb25a2a917 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_encoder.h
+++ b/drivers/gpu/drm/exynos/exynos_drm_encoder.h
@@ -46,5 +46,6 @@ void exynos_drm_encoder_plane_mode_set(struct drm_encoder *encoder, void *data);
void exynos_drm_encoder_plane_commit(struct drm_encoder *encoder, void *data);
void exynos_drm_encoder_plane_enable(struct drm_encoder *encoder, void *data);
void exynos_drm_encoder_plane_disable(struct drm_encoder *encoder, void *data);
+void exynos_drm_encoder_complete_scanout(struct drm_framebuffer *fb);
#endif
diff --git a/drivers/gpu/drm/exynos/exynos_drm_fb.c b/drivers/gpu/drm/exynos/exynos_drm_fb.c
index 4ef4cd3f9936..5426cc5a5e8d 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_fb.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_fb.c
@@ -30,10 +30,13 @@
#include <drm/drm_crtc.h>
#include <drm/drm_crtc_helper.h>
#include <drm/drm_fb_helper.h>
+#include <uapi/drm/exynos_drm.h>
#include "exynos_drm_drv.h"
#include "exynos_drm_fb.h"
#include "exynos_drm_gem.h"
+#include "exynos_drm_iommu.h"
+#include "exynos_drm_encoder.h"
#define to_exynos_fb(x) container_of(x, struct exynos_drm_fb, fb)
@@ -50,6 +53,32 @@ struct exynos_drm_fb {
struct exynos_drm_gem_obj *exynos_gem_obj[MAX_FB_BUFFER];
};
+static int check_fb_gem_memory_type(struct drm_device *drm_dev,
+ struct exynos_drm_gem_obj *exynos_gem_obj)
+{
+ unsigned int flags;
+
+ /*
+ * if exynos drm driver supports iommu then framebuffer can use
+ * all the buffer types.
+ */
+ if (is_drm_iommu_supported(drm_dev))
+ return 0;
+
+ flags = exynos_gem_obj->flags;
+
+ /*
+ * without iommu support, not support physically non-continuous memory
+ * for framebuffer.
+ */
+ if (IS_NONCONTIG_BUFFER(flags)) {
+ DRM_ERROR("cannot use this gem memory type for fb.\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
static void exynos_drm_fb_destroy(struct drm_framebuffer *fb)
{
struct exynos_drm_fb *exynos_fb = to_exynos_fb(fb);
@@ -57,6 +86,9 @@ static void exynos_drm_fb_destroy(struct drm_framebuffer *fb)
DRM_DEBUG_KMS("%s\n", __FILE__);
+ /* make sure that overlay data are updated before relesing fb. */
+ exynos_drm_encoder_complete_scanout(fb);
+
drm_framebuffer_cleanup(fb);
for (i = 0; i < ARRAY_SIZE(exynos_fb->exynos_gem_obj); i++) {
@@ -128,23 +160,32 @@ exynos_drm_framebuffer_init(struct drm_device *dev,
struct drm_gem_object *obj)
{
struct exynos_drm_fb *exynos_fb;
+ struct exynos_drm_gem_obj *exynos_gem_obj;
int ret;
+ exynos_gem_obj = to_exynos_gem_obj(obj);
+
+ ret = check_fb_gem_memory_type(dev, exynos_gem_obj);
+ if (ret < 0) {
+ DRM_ERROR("cannot use this gem memory type for fb.\n");
+ return ERR_PTR(-EINVAL);
+ }
+
exynos_fb = kzalloc(sizeof(*exynos_fb), GFP_KERNEL);
if (!exynos_fb) {
DRM_ERROR("failed to allocate exynos drm framebuffer\n");
return ERR_PTR(-ENOMEM);
}
+ drm_helper_mode_fill_fb_struct(&exynos_fb->fb, mode_cmd);
+ exynos_fb->exynos_gem_obj[0] = exynos_gem_obj;
+
ret = drm_framebuffer_init(dev, &exynos_fb->fb, &exynos_drm_fb_funcs);
if (ret) {
DRM_ERROR("failed to initialize framebuffer\n");
return ERR_PTR(ret);
}
- drm_helper_mode_fill_fb_struct(&exynos_fb->fb, mode_cmd);
- exynos_fb->exynos_gem_obj[0] = to_exynos_gem_obj(obj);
-
return &exynos_fb->fb;
}
@@ -190,9 +231,8 @@ exynos_user_fb_create(struct drm_device *dev, struct drm_file *file_priv,
struct drm_mode_fb_cmd2 *mode_cmd)
{
struct drm_gem_object *obj;
- struct drm_framebuffer *fb;
struct exynos_drm_fb *exynos_fb;
- int i;
+ int i, ret;
DRM_DEBUG_KMS("%s\n", __FILE__);
@@ -202,30 +242,56 @@ exynos_user_fb_create(struct drm_device *dev, struct drm_file *file_priv,
return ERR_PTR(-ENOENT);
}
- fb = exynos_drm_framebuffer_init(dev, mode_cmd, obj);
- if (IS_ERR(fb)) {
- drm_gem_object_unreference_unlocked(obj);
- return fb;
+ exynos_fb = kzalloc(sizeof(*exynos_fb), GFP_KERNEL);
+ if (!exynos_fb) {
+ DRM_ERROR("failed to allocate exynos drm framebuffer\n");
+ return ERR_PTR(-ENOMEM);
}
- exynos_fb = to_exynos_fb(fb);
+ drm_helper_mode_fill_fb_struct(&exynos_fb->fb, mode_cmd);
+ exynos_fb->exynos_gem_obj[0] = to_exynos_gem_obj(obj);
exynos_fb->buf_cnt = exynos_drm_format_num_buffers(mode_cmd);
DRM_DEBUG_KMS("buf_cnt = %d\n", exynos_fb->buf_cnt);
for (i = 1; i < exynos_fb->buf_cnt; i++) {
+ struct exynos_drm_gem_obj *exynos_gem_obj;
+ int ret;
+
obj = drm_gem_object_lookup(dev, file_priv,
mode_cmd->handles[i]);
if (!obj) {
DRM_ERROR("failed to lookup gem object\n");
- exynos_drm_fb_destroy(fb);
+ kfree(exynos_fb);
return ERR_PTR(-ENOENT);
}
+ exynos_gem_obj = to_exynos_gem_obj(obj);
+
+ ret = check_fb_gem_memory_type(dev, exynos_gem_obj);
+ if (ret < 0) {
+ DRM_ERROR("cannot use this gem memory type for fb.\n");
+ kfree(exynos_fb);
+ return ERR_PTR(ret);
+ }
+
exynos_fb->exynos_gem_obj[i] = to_exynos_gem_obj(obj);
}
- return fb;
+ ret = drm_framebuffer_init(dev, &exynos_fb->fb, &exynos_drm_fb_funcs);
+ if (ret) {
+ for (i = 0; i < exynos_fb->buf_cnt; i++) {
+ struct exynos_drm_gem_obj *gem_obj;
+
+ gem_obj = exynos_fb->exynos_gem_obj[i];
+ drm_gem_object_unreference_unlocked(&gem_obj->base);
+ }
+
+ kfree(exynos_fb);
+ return ERR_PTR(ret);
+ }
+
+ return &exynos_fb->fb;
}
struct exynos_drm_gem_buf *exynos_drm_fb_buffer(struct drm_framebuffer *fb,
@@ -243,9 +309,7 @@ struct exynos_drm_gem_buf *exynos_drm_fb_buffer(struct drm_framebuffer *fb,
if (!buffer)
return NULL;
- DRM_DEBUG_KMS("vaddr = 0x%lx, dma_addr = 0x%lx\n",
- (unsigned long)buffer->kvaddr,
- (unsigned long)buffer->dma_addr);
+ DRM_DEBUG_KMS("dma_addr = 0x%lx\n", (unsigned long)buffer->dma_addr);
return buffer;
}
diff --git a/drivers/gpu/drm/exynos/exynos_drm_fbdev.c b/drivers/gpu/drm/exynos/exynos_drm_fbdev.c
index 67eb6ba56edf..f433eb7533a9 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_fbdev.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_fbdev.c
@@ -46,8 +46,38 @@ struct exynos_drm_fbdev {
struct exynos_drm_gem_obj *exynos_gem_obj;
};
+static int exynos_drm_fb_mmap(struct fb_info *info,
+ struct vm_area_struct *vma)
+{
+ struct drm_fb_helper *helper = info->par;
+ struct exynos_drm_fbdev *exynos_fbd = to_exynos_fbdev(helper);
+ struct exynos_drm_gem_obj *exynos_gem_obj = exynos_fbd->exynos_gem_obj;
+ struct exynos_drm_gem_buf *buffer = exynos_gem_obj->buffer;
+ unsigned long vm_size;
+ int ret;
+
+ DRM_DEBUG_KMS("%s\n", __func__);
+
+ vma->vm_flags |= VM_IO | VM_DONTEXPAND | VM_DONTDUMP;
+
+ vm_size = vma->vm_end - vma->vm_start;
+
+ if (vm_size > buffer->size)
+ return -EINVAL;
+
+ ret = dma_mmap_attrs(helper->dev->dev, vma, buffer->pages,
+ buffer->dma_addr, buffer->size, &buffer->dma_attrs);
+ if (ret < 0) {
+ DRM_ERROR("failed to mmap.\n");
+ return ret;
+ }
+
+ return 0;
+}
+
static struct fb_ops exynos_drm_fb_ops = {
.owner = THIS_MODULE,
+ .fb_mmap = exynos_drm_fb_mmap,
.fb_fillrect = cfb_fillrect,
.fb_copyarea = cfb_copyarea,
.fb_imageblit = cfb_imageblit,
@@ -79,6 +109,17 @@ static int exynos_drm_fbdev_update(struct drm_fb_helper *helper,
return -EFAULT;
}
+ /* map pages with kernel virtual space. */
+ if (!buffer->kvaddr) {
+ unsigned int nr_pages = buffer->size >> PAGE_SHIFT;
+ buffer->kvaddr = vmap(buffer->pages, nr_pages, VM_MAP,
+ pgprot_writecombine(PAGE_KERNEL));
+ if (!buffer->kvaddr) {
+ DRM_ERROR("failed to map pages to kernel space.\n");
+ return -EIO;
+ }
+ }
+
/* buffer count to framebuffer always is 1 at booting time. */
exynos_drm_fb_set_buf_cnt(fb, 1);
@@ -87,7 +128,8 @@ static int exynos_drm_fbdev_update(struct drm_fb_helper *helper,
dev->mode_config.fb_base = (resource_size_t)buffer->dma_addr;
fbi->screen_base = buffer->kvaddr + offset;
- fbi->fix.smem_start = (unsigned long)(buffer->dma_addr + offset);
+ fbi->fix.smem_start = (unsigned long)
+ (page_to_phys(sg_page(buffer->sgt->sgl)) + offset);
fbi->screen_size = size;
fbi->fix.smem_len = size;
@@ -133,7 +175,7 @@ static int exynos_drm_fbdev_create(struct drm_fb_helper *helper,
exynos_gem_obj = exynos_drm_gem_create(dev, 0, size);
if (IS_ERR(exynos_gem_obj)) {
ret = PTR_ERR(exynos_gem_obj);
- goto out;
+ goto err_release_framebuffer;
}
exynos_fbdev->exynos_gem_obj = exynos_gem_obj;
@@ -143,7 +185,7 @@ static int exynos_drm_fbdev_create(struct drm_fb_helper *helper,
if (IS_ERR_OR_NULL(helper->fb)) {
DRM_ERROR("failed to create drm framebuffer.\n");
ret = PTR_ERR(helper->fb);
- goto out;
+ goto err_destroy_gem;
}
helper->fbdev = fbi;
@@ -155,14 +197,24 @@ static int exynos_drm_fbdev_create(struct drm_fb_helper *helper,
ret = fb_alloc_cmap(&fbi->cmap, 256, 0);
if (ret) {
DRM_ERROR("failed to allocate cmap.\n");
- goto out;
+ goto err_destroy_framebuffer;
}
ret = exynos_drm_fbdev_update(helper, helper->fb);
- if (ret < 0) {
- fb_dealloc_cmap(&fbi->cmap);
- goto out;
- }
+ if (ret < 0)
+ goto err_dealloc_cmap;
+
+ mutex_unlock(&dev->struct_mutex);
+ return ret;
+
+err_dealloc_cmap:
+ fb_dealloc_cmap(&fbi->cmap);
+err_destroy_framebuffer:
+ drm_framebuffer_cleanup(helper->fb);
+err_destroy_gem:
+ exynos_drm_gem_destroy(exynos_gem_obj);
+err_release_framebuffer:
+ framebuffer_release(fbi);
/*
* if failed, all resources allocated above would be released by
@@ -264,8 +316,13 @@ err_init:
static void exynos_drm_fbdev_destroy(struct drm_device *dev,
struct drm_fb_helper *fb_helper)
{
+ struct exynos_drm_fbdev *exynos_fbd = to_exynos_fbdev(fb_helper);
+ struct exynos_drm_gem_obj *exynos_gem_obj = exynos_fbd->exynos_gem_obj;
struct drm_framebuffer *fb;
+ if (exynos_gem_obj->buffer->kvaddr)
+ vunmap(exynos_gem_obj->buffer->kvaddr);
+
/* release drm framebuffer and real buffer */
if (fb_helper->fb && fb_helper->fb->funcs) {
fb = fb_helper->fb;
diff --git a/drivers/gpu/drm/exynos/exynos_drm_fimc.c b/drivers/gpu/drm/exynos/exynos_drm_fimc.c
new file mode 100644
index 000000000000..61ea24296b52
--- /dev/null
+++ b/drivers/gpu/drm/exynos/exynos_drm_fimc.c
@@ -0,0 +1,2001 @@
+/*
+ * Copyright (C) 2012 Samsung Electronics Co.Ltd
+ * Authors:
+ * Eunchul Kim <chulspro.kim@samsung.com>
+ * Jinyoung Jeon <jy0.jeon@samsung.com>
+ * Sangmin Lee <lsmin.lee@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ */
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/clk.h>
+#include <linux/pm_runtime.h>
+#include <plat/map-base.h>
+
+#include <drm/drmP.h>
+#include <drm/exynos_drm.h>
+#include "regs-fimc.h"
+#include "exynos_drm_ipp.h"
+#include "exynos_drm_fimc.h"
+
+/*
+ * FIMC is stand for Fully Interactive Mobile Camera and
+ * supports image scaler/rotator and input/output DMA operations.
+ * input DMA reads image data from the memory.
+ * output DMA writes image data to memory.
+ * FIMC supports image rotation and image effect functions.
+ *
+ * M2M operation : supports crop/scale/rotation/csc so on.
+ * Memory ----> FIMC H/W ----> Memory.
+ * Writeback operation : supports cloned screen with FIMD.
+ * FIMD ----> FIMC H/W ----> Memory.
+ * Output operation : supports direct display using local path.
+ * Memory ----> FIMC H/W ----> FIMD.
+ */
+
+/*
+ * TODO
+ * 1. check suspend/resume api if needed.
+ * 2. need to check use case platform_device_id.
+ * 3. check src/dst size with, height.
+ * 4. added check_prepare api for right register.
+ * 5. need to add supported list in prop_list.
+ * 6. check prescaler/scaler optimization.
+ */
+
+#define FIMC_MAX_DEVS 4
+#define FIMC_MAX_SRC 2
+#define FIMC_MAX_DST 32
+#define FIMC_SHFACTOR 10
+#define FIMC_BUF_STOP 1
+#define FIMC_BUF_START 2
+#define FIMC_REG_SZ 32
+#define FIMC_WIDTH_ITU_709 1280
+#define FIMC_REFRESH_MAX 60
+#define FIMC_REFRESH_MIN 12
+#define FIMC_CROP_MAX 8192
+#define FIMC_CROP_MIN 32
+#define FIMC_SCALE_MAX 4224
+#define FIMC_SCALE_MIN 32
+
+#define get_fimc_context(dev) platform_get_drvdata(to_platform_device(dev))
+#define get_ctx_from_ippdrv(ippdrv) container_of(ippdrv,\
+ struct fimc_context, ippdrv);
+#define fimc_read(offset) readl(ctx->regs + (offset))
+#define fimc_write(cfg, offset) writel(cfg, ctx->regs + (offset))
+
+enum fimc_wb {
+ FIMC_WB_NONE,
+ FIMC_WB_A,
+ FIMC_WB_B,
+};
+
+/*
+ * A structure of scaler.
+ *
+ * @range: narrow, wide.
+ * @bypass: unused scaler path.
+ * @up_h: horizontal scale up.
+ * @up_v: vertical scale up.
+ * @hratio: horizontal ratio.
+ * @vratio: vertical ratio.
+ */
+struct fimc_scaler {
+ bool range;
+ bool bypass;
+ bool up_h;
+ bool up_v;
+ u32 hratio;
+ u32 vratio;
+};
+
+/*
+ * A structure of scaler capability.
+ *
+ * find user manual table 43-1.
+ * @in_hori: scaler input horizontal size.
+ * @bypass: scaler bypass mode.
+ * @dst_h_wo_rot: target horizontal size without output rotation.
+ * @dst_h_rot: target horizontal size with output rotation.
+ * @rl_w_wo_rot: real width without input rotation.
+ * @rl_h_rot: real height without output rotation.
+ */
+struct fimc_capability {
+ /* scaler */
+ u32 in_hori;
+ u32 bypass;
+ /* output rotator */
+ u32 dst_h_wo_rot;
+ u32 dst_h_rot;
+ /* input rotator */
+ u32 rl_w_wo_rot;
+ u32 rl_h_rot;
+};
+
+/*
+ * A structure of fimc driver data.
+ *
+ * @parent_clk: name of parent clock.
+ */
+struct fimc_driverdata {
+ char *parent_clk;
+};
+
+/*
+ * A structure of fimc context.
+ *
+ * @ippdrv: prepare initialization using ippdrv.
+ * @regs_res: register resources.
+ * @regs: memory mapped io registers.
+ * @lock: locking of operations.
+ * @sclk_fimc_clk: fimc source clock.
+ * @fimc_clk: fimc clock.
+ * @wb_clk: writeback a clock.
+ * @wb_b_clk: writeback b clock.
+ * @sc: scaler infomations.
+ * @odr: ordering of YUV.
+ * @ver: fimc version.
+ * @pol: porarity of writeback.
+ * @id: fimc id.
+ * @irq: irq number.
+ * @suspended: qos operations.
+ */
+struct fimc_context {
+ struct exynos_drm_ippdrv ippdrv;
+ struct resource *regs_res;
+ void __iomem *regs;
+ struct mutex lock;
+ struct clk *sclk_fimc_clk;
+ struct clk *fimc_clk;
+ struct clk *wb_clk;
+ struct clk *wb_b_clk;
+ struct fimc_scaler sc;
+ struct fimc_driverdata *ddata;
+ struct exynos_drm_ipp_pol pol;
+ int id;
+ int irq;
+ bool suspended;
+};
+
+static void fimc_sw_reset(struct fimc_context *ctx, bool pattern)
+{
+ u32 cfg;
+
+ DRM_DEBUG_KMS("%s:pattern[%d]\n", __func__, pattern);
+
+ cfg = fimc_read(EXYNOS_CISRCFMT);
+ cfg |= EXYNOS_CISRCFMT_ITU601_8BIT;
+ if (pattern)
+ cfg |= EXYNOS_CIGCTRL_TESTPATTERN_COLOR_BAR;
+
+ fimc_write(cfg, EXYNOS_CISRCFMT);
+
+ /* s/w reset */
+ cfg = fimc_read(EXYNOS_CIGCTRL);
+ cfg |= (EXYNOS_CIGCTRL_SWRST);
+ fimc_write(cfg, EXYNOS_CIGCTRL);
+
+ /* s/w reset complete */
+ cfg = fimc_read(EXYNOS_CIGCTRL);
+ cfg &= ~EXYNOS_CIGCTRL_SWRST;
+ fimc_write(cfg, EXYNOS_CIGCTRL);
+
+ /* reset sequence */
+ fimc_write(0x0, EXYNOS_CIFCNTSEQ);
+}
+
+static void fimc_set_camblk_fimd0_wb(struct fimc_context *ctx)
+{
+ u32 camblk_cfg;
+
+ DRM_DEBUG_KMS("%s\n", __func__);
+
+ camblk_cfg = readl(SYSREG_CAMERA_BLK);
+ camblk_cfg &= ~(SYSREG_FIMD0WB_DEST_MASK);
+ camblk_cfg |= ctx->id << (SYSREG_FIMD0WB_DEST_SHIFT);
+
+ writel(camblk_cfg, SYSREG_CAMERA_BLK);
+}
+
+static void fimc_set_type_ctrl(struct fimc_context *ctx, enum fimc_wb wb)
+{
+ u32 cfg;
+
+ DRM_DEBUG_KMS("%s:wb[%d]\n", __func__, wb);
+
+ cfg = fimc_read(EXYNOS_CIGCTRL);
+ cfg &= ~(EXYNOS_CIGCTRL_TESTPATTERN_MASK |
+ EXYNOS_CIGCTRL_SELCAM_ITU_MASK |
+ EXYNOS_CIGCTRL_SELCAM_MIPI_MASK |
+ EXYNOS_CIGCTRL_SELCAM_FIMC_MASK |
+ EXYNOS_CIGCTRL_SELWB_CAMIF_MASK |
+ EXYNOS_CIGCTRL_SELWRITEBACK_MASK);
+
+ switch (wb) {
+ case FIMC_WB_A:
+ cfg |= (EXYNOS_CIGCTRL_SELWRITEBACK_A |
+ EXYNOS_CIGCTRL_SELWB_CAMIF_WRITEBACK);
+ break;
+ case FIMC_WB_B:
+ cfg |= (EXYNOS_CIGCTRL_SELWRITEBACK_B |
+ EXYNOS_CIGCTRL_SELWB_CAMIF_WRITEBACK);
+ break;
+ case FIMC_WB_NONE:
+ default:
+ cfg |= (EXYNOS_CIGCTRL_SELCAM_ITU_A |
+ EXYNOS_CIGCTRL_SELWRITEBACK_A |
+ EXYNOS_CIGCTRL_SELCAM_MIPI_A |
+ EXYNOS_CIGCTRL_SELCAM_FIMC_ITU);
+ break;
+ }
+
+ fimc_write(cfg, EXYNOS_CIGCTRL);
+}
+
+static void fimc_set_polarity(struct fimc_context *ctx,
+ struct exynos_drm_ipp_pol *pol)
+{
+ u32 cfg;
+
+ DRM_DEBUG_KMS("%s:inv_pclk[%d]inv_vsync[%d]\n",
+ __func__, pol->inv_pclk, pol->inv_vsync);
+ DRM_DEBUG_KMS("%s:inv_href[%d]inv_hsync[%d]\n",
+ __func__, pol->inv_href, pol->inv_hsync);
+
+ cfg = fimc_read(EXYNOS_CIGCTRL);
+ cfg &= ~(EXYNOS_CIGCTRL_INVPOLPCLK | EXYNOS_CIGCTRL_INVPOLVSYNC |
+ EXYNOS_CIGCTRL_INVPOLHREF | EXYNOS_CIGCTRL_INVPOLHSYNC);
+
+ if (pol->inv_pclk)
+ cfg |= EXYNOS_CIGCTRL_INVPOLPCLK;
+ if (pol->inv_vsync)
+ cfg |= EXYNOS_CIGCTRL_INVPOLVSYNC;
+ if (pol->inv_href)
+ cfg |= EXYNOS_CIGCTRL_INVPOLHREF;
+ if (pol->inv_hsync)
+ cfg |= EXYNOS_CIGCTRL_INVPOLHSYNC;
+
+ fimc_write(cfg, EXYNOS_CIGCTRL);
+}
+
+static void fimc_handle_jpeg(struct fimc_context *ctx, bool enable)
+{
+ u32 cfg;
+
+ DRM_DEBUG_KMS("%s:enable[%d]\n", __func__, enable);
+
+ cfg = fimc_read(EXYNOS_CIGCTRL);
+ if (enable)
+ cfg |= EXYNOS_CIGCTRL_CAM_JPEG;
+ else
+ cfg &= ~EXYNOS_CIGCTRL_CAM_JPEG;
+
+ fimc_write(cfg, EXYNOS_CIGCTRL);
+}
+
+static void fimc_handle_irq(struct fimc_context *ctx, bool enable,
+ bool overflow, bool level)
+{
+ u32 cfg;
+
+ DRM_DEBUG_KMS("%s:enable[%d]overflow[%d]level[%d]\n", __func__,
+ enable, overflow, level);
+
+ cfg = fimc_read(EXYNOS_CIGCTRL);
+ if (enable) {
+ cfg &= ~(EXYNOS_CIGCTRL_IRQ_OVFEN | EXYNOS_CIGCTRL_IRQ_LEVEL);
+ cfg |= EXYNOS_CIGCTRL_IRQ_ENABLE;
+ if (overflow)
+ cfg |= EXYNOS_CIGCTRL_IRQ_OVFEN;
+ if (level)
+ cfg |= EXYNOS_CIGCTRL_IRQ_LEVEL;
+ } else
+ cfg &= ~(EXYNOS_CIGCTRL_IRQ_OVFEN | EXYNOS_CIGCTRL_IRQ_ENABLE);
+
+ fimc_write(cfg, EXYNOS_CIGCTRL);
+}
+
+static void fimc_clear_irq(struct fimc_context *ctx)
+{
+ u32 cfg;
+
+ DRM_DEBUG_KMS("%s\n", __func__);
+
+ cfg = fimc_read(EXYNOS_CIGCTRL);
+ cfg |= EXYNOS_CIGCTRL_IRQ_CLR;
+ fimc_write(cfg, EXYNOS_CIGCTRL);
+}
+
+static bool fimc_check_ovf(struct fimc_context *ctx)
+{
+ struct exynos_drm_ippdrv *ippdrv = &ctx->ippdrv;
+ u32 cfg, status, flag;
+
+ status = fimc_read(EXYNOS_CISTATUS);
+ flag = EXYNOS_CISTATUS_OVFIY | EXYNOS_CISTATUS_OVFICB |
+ EXYNOS_CISTATUS_OVFICR;
+
+ DRM_DEBUG_KMS("%s:flag[0x%x]\n", __func__, flag);
+
+ if (status & flag) {
+ cfg = fimc_read(EXYNOS_CIWDOFST);
+ cfg |= (EXYNOS_CIWDOFST_CLROVFIY | EXYNOS_CIWDOFST_CLROVFICB |
+ EXYNOS_CIWDOFST_CLROVFICR);
+
+ fimc_write(cfg, EXYNOS_CIWDOFST);
+
+ cfg = fimc_read(EXYNOS_CIWDOFST);
+ cfg &= ~(EXYNOS_CIWDOFST_CLROVFIY | EXYNOS_CIWDOFST_CLROVFICB |
+ EXYNOS_CIWDOFST_CLROVFICR);
+
+ fimc_write(cfg, EXYNOS_CIWDOFST);
+
+ dev_err(ippdrv->dev, "occured overflow at %d, status 0x%x.\n",
+ ctx->id, status);
+ return true;
+ }
+
+ return false;
+}
+
+static bool fimc_check_frame_end(struct fimc_context *ctx)
+{
+ u32 cfg;
+
+ cfg = fimc_read(EXYNOS_CISTATUS);
+
+ DRM_DEBUG_KMS("%s:cfg[0x%x]\n", __func__, cfg);
+
+ if (!(cfg & EXYNOS_CISTATUS_FRAMEEND))
+ return false;
+
+ cfg &= ~(EXYNOS_CISTATUS_FRAMEEND);
+ fimc_write(cfg, EXYNOS_CISTATUS);
+
+ return true;
+}
+
+static int fimc_get_buf_id(struct fimc_context *ctx)
+{
+ u32 cfg;
+ int frame_cnt, buf_id;
+
+ DRM_DEBUG_KMS("%s\n", __func__);
+
+ cfg = fimc_read(EXYNOS_CISTATUS2);
+ frame_cnt = EXYNOS_CISTATUS2_GET_FRAMECOUNT_BEFORE(cfg);
+
+ if (frame_cnt == 0)
+ frame_cnt = EXYNOS_CISTATUS2_GET_FRAMECOUNT_PRESENT(cfg);
+
+ DRM_DEBUG_KMS("%s:present[%d]before[%d]\n", __func__,
+ EXYNOS_CISTATUS2_GET_FRAMECOUNT_PRESENT(cfg),
+ EXYNOS_CISTATUS2_GET_FRAMECOUNT_BEFORE(cfg));
+
+ if (frame_cnt == 0) {
+ DRM_ERROR("failed to get frame count.\n");
+ return -EIO;
+ }
+
+ buf_id = frame_cnt - 1;
+ DRM_DEBUG_KMS("%s:buf_id[%d]\n", __func__, buf_id);
+
+ return buf_id;
+}
+
+static void fimc_handle_lastend(struct fimc_context *ctx, bool enable)
+{
+ u32 cfg;
+
+ DRM_DEBUG_KMS("%s:enable[%d]\n", __func__, enable);
+
+ cfg = fimc_read(EXYNOS_CIOCTRL);
+ if (enable)
+ cfg |= EXYNOS_CIOCTRL_LASTENDEN;
+ else
+ cfg &= ~EXYNOS_CIOCTRL_LASTENDEN;
+
+ fimc_write(cfg, EXYNOS_CIOCTRL);
+}
+
+
+static int fimc_src_set_fmt_order(struct fimc_context *ctx, u32 fmt)
+{
+ struct exynos_drm_ippdrv *ippdrv = &ctx->ippdrv;
+ u32 cfg;
+
+ DRM_DEBUG_KMS("%s:fmt[0x%x]\n", __func__, fmt);
+
+ /* RGB */
+ cfg = fimc_read(EXYNOS_CISCCTRL);
+ cfg &= ~EXYNOS_CISCCTRL_INRGB_FMT_RGB_MASK;
+
+ switch (fmt) {
+ case DRM_FORMAT_RGB565:
+ cfg |= EXYNOS_CISCCTRL_INRGB_FMT_RGB565;
+ fimc_write(cfg, EXYNOS_CISCCTRL);
+ return 0;
+ case DRM_FORMAT_RGB888:
+ case DRM_FORMAT_XRGB8888:
+ cfg |= EXYNOS_CISCCTRL_INRGB_FMT_RGB888;
+ fimc_write(cfg, EXYNOS_CISCCTRL);
+ return 0;
+ default:
+ /* bypass */
+ break;
+ }
+
+ /* YUV */
+ cfg = fimc_read(EXYNOS_MSCTRL);
+ cfg &= ~(EXYNOS_MSCTRL_ORDER2P_SHIFT_MASK |
+ EXYNOS_MSCTRL_C_INT_IN_2PLANE |
+ EXYNOS_MSCTRL_ORDER422_YCBYCR);
+
+ switch (fmt) {
+ case DRM_FORMAT_YUYV:
+ cfg |= EXYNOS_MSCTRL_ORDER422_YCBYCR;
+ break;
+ case DRM_FORMAT_YVYU:
+ cfg |= EXYNOS_MSCTRL_ORDER422_YCRYCB;
+ break;
+ case DRM_FORMAT_UYVY:
+ cfg |= EXYNOS_MSCTRL_ORDER422_CBYCRY;
+ break;
+ case DRM_FORMAT_VYUY:
+ case DRM_FORMAT_YUV444:
+ cfg |= EXYNOS_MSCTRL_ORDER422_CRYCBY;
+ break;
+ case DRM_FORMAT_NV21:
+ case DRM_FORMAT_NV61:
+ cfg |= (EXYNOS_MSCTRL_ORDER2P_LSB_CRCB |
+ EXYNOS_MSCTRL_C_INT_IN_2PLANE);
+ break;
+ case DRM_FORMAT_YUV422:
+ case DRM_FORMAT_YUV420:
+ case DRM_FORMAT_YVU420:
+ cfg |= EXYNOS_MSCTRL_C_INT_IN_3PLANE;
+ break;
+ case DRM_FORMAT_NV12:
+ case DRM_FORMAT_NV12MT:
+ case DRM_FORMAT_NV16:
+ cfg |= (EXYNOS_MSCTRL_ORDER2P_LSB_CBCR |
+ EXYNOS_MSCTRL_C_INT_IN_2PLANE);
+ break;
+ default:
+ dev_err(ippdrv->dev, "inavlid source yuv order 0x%x.\n", fmt);
+ return -EINVAL;
+ }
+
+ fimc_write(cfg, EXYNOS_MSCTRL);
+
+ return 0;
+}
+
+static int fimc_src_set_fmt(struct device *dev, u32 fmt)
+{
+ struct fimc_context *ctx = get_fimc_context(dev);
+ struct exynos_drm_ippdrv *ippdrv = &ctx->ippdrv;
+ u32 cfg;
+
+ DRM_DEBUG_KMS("%s:fmt[0x%x]\n", __func__, fmt);
+
+ cfg = fimc_read(EXYNOS_MSCTRL);
+ cfg &= ~EXYNOS_MSCTRL_INFORMAT_RGB;
+
+ switch (fmt) {
+ case DRM_FORMAT_RGB565:
+ case DRM_FORMAT_RGB888:
+ case DRM_FORMAT_XRGB8888:
+ cfg |= EXYNOS_MSCTRL_INFORMAT_RGB;
+ break;
+ case DRM_FORMAT_YUV444:
+ cfg |= EXYNOS_MSCTRL_INFORMAT_YCBCR420;
+ break;
+ case DRM_FORMAT_YUYV:
+ case DRM_FORMAT_YVYU:
+ case DRM_FORMAT_UYVY:
+ case DRM_FORMAT_VYUY:
+ cfg |= EXYNOS_MSCTRL_INFORMAT_YCBCR422_1PLANE;
+ break;
+ case DRM_FORMAT_NV16:
+ case DRM_FORMAT_NV61:
+ case DRM_FORMAT_YUV422:
+ cfg |= EXYNOS_MSCTRL_INFORMAT_YCBCR422;
+ break;
+ case DRM_FORMAT_YUV420:
+ case DRM_FORMAT_YVU420:
+ case DRM_FORMAT_NV12:
+ case DRM_FORMAT_NV21:
+ case DRM_FORMAT_NV12MT:
+ cfg |= EXYNOS_MSCTRL_INFORMAT_YCBCR420;
+ break;
+ default:
+ dev_err(ippdrv->dev, "inavlid source format 0x%x.\n", fmt);
+ return -EINVAL;
+ }
+
+ fimc_write(cfg, EXYNOS_MSCTRL);
+
+ cfg = fimc_read(EXYNOS_CIDMAPARAM);
+ cfg &= ~EXYNOS_CIDMAPARAM_R_MODE_MASK;
+
+ if (fmt == DRM_FORMAT_NV12MT)
+ cfg |= EXYNOS_CIDMAPARAM_R_MODE_64X32;
+ else
+ cfg |= EXYNOS_CIDMAPARAM_R_MODE_LINEAR;
+
+ fimc_write(cfg, EXYNOS_CIDMAPARAM);
+
+ return fimc_src_set_fmt_order(ctx, fmt);
+}
+
+static int fimc_src_set_transf(struct device *dev,
+ enum drm_exynos_degree degree,
+ enum drm_exynos_flip flip, bool *swap)
+{
+ struct fimc_context *ctx = get_fimc_context(dev);
+ struct exynos_drm_ippdrv *ippdrv = &ctx->ippdrv;
+ u32 cfg1, cfg2;
+
+ DRM_DEBUG_KMS("%s:degree[%d]flip[0x%x]\n", __func__,
+ degree, flip);
+
+ cfg1 = fimc_read(EXYNOS_MSCTRL);
+ cfg1 &= ~(EXYNOS_MSCTRL_FLIP_X_MIRROR |
+ EXYNOS_MSCTRL_FLIP_Y_MIRROR);
+
+ cfg2 = fimc_read(EXYNOS_CITRGFMT);
+ cfg2 &= ~EXYNOS_CITRGFMT_INROT90_CLOCKWISE;
+
+ switch (degree) {
+ case EXYNOS_DRM_DEGREE_0:
+ if (flip & EXYNOS_DRM_FLIP_VERTICAL)
+ cfg1 |= EXYNOS_MSCTRL_FLIP_X_MIRROR;
+ if (flip & EXYNOS_DRM_FLIP_HORIZONTAL)
+ cfg1 |= EXYNOS_MSCTRL_FLIP_Y_MIRROR;
+ break;
+ case EXYNOS_DRM_DEGREE_90:
+ cfg2 |= EXYNOS_CITRGFMT_INROT90_CLOCKWISE;
+ if (flip & EXYNOS_DRM_FLIP_VERTICAL)
+ cfg1 |= EXYNOS_MSCTRL_FLIP_X_MIRROR;
+ if (flip & EXYNOS_DRM_FLIP_HORIZONTAL)
+ cfg1 |= EXYNOS_MSCTRL_FLIP_Y_MIRROR;
+ break;
+ case EXYNOS_DRM_DEGREE_180:
+ cfg1 |= (EXYNOS_MSCTRL_FLIP_X_MIRROR |
+ EXYNOS_MSCTRL_FLIP_Y_MIRROR);
+ if (flip & EXYNOS_DRM_FLIP_VERTICAL)
+ cfg1 &= ~EXYNOS_MSCTRL_FLIP_X_MIRROR;
+ if (flip & EXYNOS_DRM_FLIP_HORIZONTAL)
+ cfg1 &= ~EXYNOS_MSCTRL_FLIP_Y_MIRROR;
+ break;
+ case EXYNOS_DRM_DEGREE_270:
+ cfg1 |= (EXYNOS_MSCTRL_FLIP_X_MIRROR |
+ EXYNOS_MSCTRL_FLIP_Y_MIRROR);
+ cfg2 |= EXYNOS_CITRGFMT_INROT90_CLOCKWISE;
+ if (flip & EXYNOS_DRM_FLIP_VERTICAL)
+ cfg1 &= ~EXYNOS_MSCTRL_FLIP_X_MIRROR;
+ if (flip & EXYNOS_DRM_FLIP_HORIZONTAL)
+ cfg1 &= ~EXYNOS_MSCTRL_FLIP_Y_MIRROR;
+ break;
+ default:
+ dev_err(ippdrv->dev, "inavlid degree value %d.\n", degree);
+ return -EINVAL;
+ }
+
+ fimc_write(cfg1, EXYNOS_MSCTRL);
+ fimc_write(cfg2, EXYNOS_CITRGFMT);
+ *swap = (cfg2 & EXYNOS_CITRGFMT_INROT90_CLOCKWISE) ? 1 : 0;
+
+ return 0;
+}
+
+static int fimc_set_window(struct fimc_context *ctx,
+ struct drm_exynos_pos *pos, struct drm_exynos_sz *sz)
+{
+ u32 cfg, h1, h2, v1, v2;
+
+ /* cropped image */
+ h1 = pos->x;
+ h2 = sz->hsize - pos->w - pos->x;
+ v1 = pos->y;
+ v2 = sz->vsize - pos->h - pos->y;
+
+ DRM_DEBUG_KMS("%s:x[%d]y[%d]w[%d]h[%d]hsize[%d]vsize[%d]\n",
+ __func__, pos->x, pos->y, pos->w, pos->h, sz->hsize, sz->vsize);
+ DRM_DEBUG_KMS("%s:h1[%d]h2[%d]v1[%d]v2[%d]\n", __func__,
+ h1, h2, v1, v2);
+
+ /*
+ * set window offset 1, 2 size
+ * check figure 43-21 in user manual
+ */
+ cfg = fimc_read(EXYNOS_CIWDOFST);
+ cfg &= ~(EXYNOS_CIWDOFST_WINHOROFST_MASK |
+ EXYNOS_CIWDOFST_WINVEROFST_MASK);
+ cfg |= (EXYNOS_CIWDOFST_WINHOROFST(h1) |
+ EXYNOS_CIWDOFST_WINVEROFST(v1));
+ cfg |= EXYNOS_CIWDOFST_WINOFSEN;
+ fimc_write(cfg, EXYNOS_CIWDOFST);
+
+ cfg = (EXYNOS_CIWDOFST2_WINHOROFST2(h2) |
+ EXYNOS_CIWDOFST2_WINVEROFST2(v2));
+ fimc_write(cfg, EXYNOS_CIWDOFST2);
+
+ return 0;
+}
+
+static int fimc_src_set_size(struct device *dev, int swap,
+ struct drm_exynos_pos *pos, struct drm_exynos_sz *sz)
+{
+ struct fimc_context *ctx = get_fimc_context(dev);
+ struct drm_exynos_pos img_pos = *pos;
+ struct drm_exynos_sz img_sz = *sz;
+ u32 cfg;
+
+ DRM_DEBUG_KMS("%s:swap[%d]hsize[%d]vsize[%d]\n",
+ __func__, swap, sz->hsize, sz->vsize);
+
+ /* original size */
+ cfg = (EXYNOS_ORGISIZE_HORIZONTAL(img_sz.hsize) |
+ EXYNOS_ORGISIZE_VERTICAL(img_sz.vsize));
+
+ fimc_write(cfg, EXYNOS_ORGISIZE);
+
+ DRM_DEBUG_KMS("%s:x[%d]y[%d]w[%d]h[%d]\n", __func__,
+ pos->x, pos->y, pos->w, pos->h);
+
+ if (swap) {
+ img_pos.w = pos->h;
+ img_pos.h = pos->w;
+ img_sz.hsize = sz->vsize;
+ img_sz.vsize = sz->hsize;
+ }
+
+ /* set input DMA image size */
+ cfg = fimc_read(EXYNOS_CIREAL_ISIZE);
+ cfg &= ~(EXYNOS_CIREAL_ISIZE_HEIGHT_MASK |
+ EXYNOS_CIREAL_ISIZE_WIDTH_MASK);
+ cfg |= (EXYNOS_CIREAL_ISIZE_WIDTH(img_pos.w) |
+ EXYNOS_CIREAL_ISIZE_HEIGHT(img_pos.h));
+ fimc_write(cfg, EXYNOS_CIREAL_ISIZE);
+
+ /*
+ * set input FIFO image size
+ * for now, we support only ITU601 8 bit mode
+ */
+ cfg = (EXYNOS_CISRCFMT_ITU601_8BIT |
+ EXYNOS_CISRCFMT_SOURCEHSIZE(img_sz.hsize) |
+ EXYNOS_CISRCFMT_SOURCEVSIZE(img_sz.vsize));
+ fimc_write(cfg, EXYNOS_CISRCFMT);
+
+ /* offset Y(RGB), Cb, Cr */
+ cfg = (EXYNOS_CIIYOFF_HORIZONTAL(img_pos.x) |
+ EXYNOS_CIIYOFF_VERTICAL(img_pos.y));
+ fimc_write(cfg, EXYNOS_CIIYOFF);
+ cfg = (EXYNOS_CIICBOFF_HORIZONTAL(img_pos.x) |
+ EXYNOS_CIICBOFF_VERTICAL(img_pos.y));
+ fimc_write(cfg, EXYNOS_CIICBOFF);
+ cfg = (EXYNOS_CIICROFF_HORIZONTAL(img_pos.x) |
+ EXYNOS_CIICROFF_VERTICAL(img_pos.y));
+ fimc_write(cfg, EXYNOS_CIICROFF);
+
+ return fimc_set_window(ctx, &img_pos, &img_sz);
+}
+
+static int fimc_src_set_addr(struct device *dev,
+ struct drm_exynos_ipp_buf_info *buf_info, u32 buf_id,
+ enum drm_exynos_ipp_buf_type buf_type)
+{
+ struct fimc_context *ctx = get_fimc_context(dev);
+ struct exynos_drm_ippdrv *ippdrv = &ctx->ippdrv;
+ struct drm_exynos_ipp_cmd_node *c_node = ippdrv->cmd;
+ struct drm_exynos_ipp_property *property;
+ struct drm_exynos_ipp_config *config;
+
+ if (!c_node) {
+ DRM_ERROR("failed to get c_node.\n");
+ return -EINVAL;
+ }
+
+ property = &c_node->property;
+ if (!property) {
+ DRM_ERROR("failed to get property.\n");
+ return -EINVAL;
+ }
+
+ DRM_DEBUG_KMS("%s:prop_id[%d]buf_id[%d]buf_type[%d]\n", __func__,
+ property->prop_id, buf_id, buf_type);
+
+ if (buf_id > FIMC_MAX_SRC) {
+ dev_info(ippdrv->dev, "inavlid buf_id %d.\n", buf_id);
+ return -ENOMEM;
+ }
+
+ /* address register set */
+ switch (buf_type) {
+ case IPP_BUF_ENQUEUE:
+ config = &property->config[EXYNOS_DRM_OPS_SRC];
+ fimc_write(buf_info->base[EXYNOS_DRM_PLANAR_Y],
+ EXYNOS_CIIYSA(buf_id));
+
+ if (config->fmt == DRM_FORMAT_YVU420) {
+ fimc_write(buf_info->base[EXYNOS_DRM_PLANAR_CR],
+ EXYNOS_CIICBSA(buf_id));
+ fimc_write(buf_info->base[EXYNOS_DRM_PLANAR_CB],
+ EXYNOS_CIICRSA(buf_id));
+ } else {
+ fimc_write(buf_info->base[EXYNOS_DRM_PLANAR_CB],
+ EXYNOS_CIICBSA(buf_id));
+ fimc_write(buf_info->base[EXYNOS_DRM_PLANAR_CR],
+ EXYNOS_CIICRSA(buf_id));
+ }
+ break;
+ case IPP_BUF_DEQUEUE:
+ fimc_write(0x0, EXYNOS_CIIYSA(buf_id));
+ fimc_write(0x0, EXYNOS_CIICBSA(buf_id));
+ fimc_write(0x0, EXYNOS_CIICRSA(buf_id));
+ break;
+ default:
+ /* bypass */
+ break;
+ }
+
+ return 0;
+}
+
+static struct exynos_drm_ipp_ops fimc_src_ops = {
+ .set_fmt = fimc_src_set_fmt,
+ .set_transf = fimc_src_set_transf,
+ .set_size = fimc_src_set_size,
+ .set_addr = fimc_src_set_addr,
+};
+
+static int fimc_dst_set_fmt_order(struct fimc_context *ctx, u32 fmt)
+{
+ struct exynos_drm_ippdrv *ippdrv = &ctx->ippdrv;
+ u32 cfg;
+
+ DRM_DEBUG_KMS("%s:fmt[0x%x]\n", __func__, fmt);
+
+ /* RGB */
+ cfg = fimc_read(EXYNOS_CISCCTRL);
+ cfg &= ~EXYNOS_CISCCTRL_OUTRGB_FMT_RGB_MASK;
+
+ switch (fmt) {
+ case DRM_FORMAT_RGB565:
+ cfg |= EXYNOS_CISCCTRL_OUTRGB_FMT_RGB565;
+ fimc_write(cfg, EXYNOS_CISCCTRL);
+ return 0;
+ case DRM_FORMAT_RGB888:
+ cfg |= EXYNOS_CISCCTRL_OUTRGB_FMT_RGB888;
+ fimc_write(cfg, EXYNOS_CISCCTRL);
+ return 0;
+ case DRM_FORMAT_XRGB8888:
+ cfg |= (EXYNOS_CISCCTRL_OUTRGB_FMT_RGB888 |
+ EXYNOS_CISCCTRL_EXTRGB_EXTENSION);
+ fimc_write(cfg, EXYNOS_CISCCTRL);
+ break;
+ default:
+ /* bypass */
+ break;
+ }
+
+ /* YUV */
+ cfg = fimc_read(EXYNOS_CIOCTRL);
+ cfg &= ~(EXYNOS_CIOCTRL_ORDER2P_MASK |
+ EXYNOS_CIOCTRL_ORDER422_MASK |
+ EXYNOS_CIOCTRL_YCBCR_PLANE_MASK);
+
+ switch (fmt) {
+ case DRM_FORMAT_XRGB8888:
+ cfg |= EXYNOS_CIOCTRL_ALPHA_OUT;
+ break;
+ case DRM_FORMAT_YUYV:
+ cfg |= EXYNOS_CIOCTRL_ORDER422_YCBYCR;
+ break;
+ case DRM_FORMAT_YVYU:
+ cfg |= EXYNOS_CIOCTRL_ORDER422_YCRYCB;
+ break;
+ case DRM_FORMAT_UYVY:
+ cfg |= EXYNOS_CIOCTRL_ORDER422_CBYCRY;
+ break;
+ case DRM_FORMAT_VYUY:
+ cfg |= EXYNOS_CIOCTRL_ORDER422_CRYCBY;
+ break;
+ case DRM_FORMAT_NV21:
+ case DRM_FORMAT_NV61:
+ cfg |= EXYNOS_CIOCTRL_ORDER2P_LSB_CRCB;
+ cfg |= EXYNOS_CIOCTRL_YCBCR_2PLANE;
+ break;
+ case DRM_FORMAT_YUV422:
+ case DRM_FORMAT_YUV420:
+ case DRM_FORMAT_YVU420:
+ cfg |= EXYNOS_CIOCTRL_YCBCR_3PLANE;
+ break;
+ case DRM_FORMAT_NV12:
+ case DRM_FORMAT_NV12MT:
+ case DRM_FORMAT_NV16:
+ cfg |= EXYNOS_CIOCTRL_ORDER2P_LSB_CBCR;
+ cfg |= EXYNOS_CIOCTRL_YCBCR_2PLANE;
+ break;
+ default:
+ dev_err(ippdrv->dev, "inavlid target yuv order 0x%x.\n", fmt);
+ return -EINVAL;
+ }
+
+ fimc_write(cfg, EXYNOS_CIOCTRL);
+
+ return 0;
+}
+
+static int fimc_dst_set_fmt(struct device *dev, u32 fmt)
+{
+ struct fimc_context *ctx = get_fimc_context(dev);
+ struct exynos_drm_ippdrv *ippdrv = &ctx->ippdrv;
+ u32 cfg;
+
+ DRM_DEBUG_KMS("%s:fmt[0x%x]\n", __func__, fmt);
+
+ cfg = fimc_read(EXYNOS_CIEXTEN);
+
+ if (fmt == DRM_FORMAT_AYUV) {
+ cfg |= EXYNOS_CIEXTEN_YUV444_OUT;
+ fimc_write(cfg, EXYNOS_CIEXTEN);
+ } else {
+ cfg &= ~EXYNOS_CIEXTEN_YUV444_OUT;
+ fimc_write(cfg, EXYNOS_CIEXTEN);
+
+ cfg = fimc_read(EXYNOS_CITRGFMT);
+ cfg &= ~EXYNOS_CITRGFMT_OUTFORMAT_MASK;
+
+ switch (fmt) {
+ case DRM_FORMAT_RGB565:
+ case DRM_FORMAT_RGB888:
+ case DRM_FORMAT_XRGB8888:
+ cfg |= EXYNOS_CITRGFMT_OUTFORMAT_RGB;
+ break;
+ case DRM_FORMAT_YUYV:
+ case DRM_FORMAT_YVYU:
+ case DRM_FORMAT_UYVY:
+ case DRM_FORMAT_VYUY:
+ cfg |= EXYNOS_CITRGFMT_OUTFORMAT_YCBCR422_1PLANE;
+ break;
+ case DRM_FORMAT_NV16:
+ case DRM_FORMAT_NV61:
+ case DRM_FORMAT_YUV422:
+ cfg |= EXYNOS_CITRGFMT_OUTFORMAT_YCBCR422;
+ break;
+ case DRM_FORMAT_YUV420:
+ case DRM_FORMAT_YVU420:
+ case DRM_FORMAT_NV12:
+ case DRM_FORMAT_NV12MT:
+ case DRM_FORMAT_NV21:
+ cfg |= EXYNOS_CITRGFMT_OUTFORMAT_YCBCR420;
+ break;
+ default:
+ dev_err(ippdrv->dev, "inavlid target format 0x%x.\n",
+ fmt);
+ return -EINVAL;
+ }
+
+ fimc_write(cfg, EXYNOS_CITRGFMT);
+ }
+
+ cfg = fimc_read(EXYNOS_CIDMAPARAM);
+ cfg &= ~EXYNOS_CIDMAPARAM_W_MODE_MASK;
+
+ if (fmt == DRM_FORMAT_NV12MT)
+ cfg |= EXYNOS_CIDMAPARAM_W_MODE_64X32;
+ else
+ cfg |= EXYNOS_CIDMAPARAM_W_MODE_LINEAR;
+
+ fimc_write(cfg, EXYNOS_CIDMAPARAM);
+
+ return fimc_dst_set_fmt_order(ctx, fmt);
+}
+
+static int fimc_dst_set_transf(struct device *dev,
+ enum drm_exynos_degree degree,
+ enum drm_exynos_flip flip, bool *swap)
+{
+ struct fimc_context *ctx = get_fimc_context(dev);
+ struct exynos_drm_ippdrv *ippdrv = &ctx->ippdrv;
+ u32 cfg;
+
+ DRM_DEBUG_KMS("%s:degree[%d]flip[0x%x]\n", __func__,
+ degree, flip);
+
+ cfg = fimc_read(EXYNOS_CITRGFMT);
+ cfg &= ~EXYNOS_CITRGFMT_FLIP_MASK;
+ cfg &= ~EXYNOS_CITRGFMT_OUTROT90_CLOCKWISE;
+
+ switch (degree) {
+ case EXYNOS_DRM_DEGREE_0:
+ if (flip & EXYNOS_DRM_FLIP_VERTICAL)
+ cfg |= EXYNOS_CITRGFMT_FLIP_X_MIRROR;
+ if (flip & EXYNOS_DRM_FLIP_HORIZONTAL)
+ cfg |= EXYNOS_CITRGFMT_FLIP_Y_MIRROR;
+ break;
+ case EXYNOS_DRM_DEGREE_90:
+ cfg |= EXYNOS_CITRGFMT_OUTROT90_CLOCKWISE;
+ if (flip & EXYNOS_DRM_FLIP_VERTICAL)
+ cfg |= EXYNOS_CITRGFMT_FLIP_X_MIRROR;
+ if (flip & EXYNOS_DRM_FLIP_HORIZONTAL)
+ cfg |= EXYNOS_CITRGFMT_FLIP_Y_MIRROR;
+ break;
+ case EXYNOS_DRM_DEGREE_180:
+ cfg |= (EXYNOS_CITRGFMT_FLIP_X_MIRROR |
+ EXYNOS_CITRGFMT_FLIP_Y_MIRROR);
+ if (flip & EXYNOS_DRM_FLIP_VERTICAL)
+ cfg &= ~EXYNOS_CITRGFMT_FLIP_X_MIRROR;
+ if (flip & EXYNOS_DRM_FLIP_HORIZONTAL)
+ cfg &= ~EXYNOS_CITRGFMT_FLIP_Y_MIRROR;
+ break;
+ case EXYNOS_DRM_DEGREE_270:
+ cfg |= (EXYNOS_CITRGFMT_OUTROT90_CLOCKWISE |
+ EXYNOS_CITRGFMT_FLIP_X_MIRROR |
+ EXYNOS_CITRGFMT_FLIP_Y_MIRROR);
+ if (flip & EXYNOS_DRM_FLIP_VERTICAL)
+ cfg &= ~EXYNOS_CITRGFMT_FLIP_X_MIRROR;
+ if (flip & EXYNOS_DRM_FLIP_HORIZONTAL)
+ cfg &= ~EXYNOS_CITRGFMT_FLIP_Y_MIRROR;
+ break;
+ default:
+ dev_err(ippdrv->dev, "inavlid degree value %d.\n", degree);
+ return -EINVAL;
+ }
+
+ fimc_write(cfg, EXYNOS_CITRGFMT);
+ *swap = (cfg & EXYNOS_CITRGFMT_OUTROT90_CLOCKWISE) ? 1 : 0;
+
+ return 0;
+}
+
+static int fimc_get_ratio_shift(u32 src, u32 dst, u32 *ratio, u32 *shift)
+{
+ DRM_DEBUG_KMS("%s:src[%d]dst[%d]\n", __func__, src, dst);
+
+ if (src >= dst * 64) {
+ DRM_ERROR("failed to make ratio and shift.\n");
+ return -EINVAL;
+ } else if (src >= dst * 32) {
+ *ratio = 32;
+ *shift = 5;
+ } else if (src >= dst * 16) {
+ *ratio = 16;
+ *shift = 4;
+ } else if (src >= dst * 8) {
+ *ratio = 8;
+ *shift = 3;
+ } else if (src >= dst * 4) {
+ *ratio = 4;
+ *shift = 2;
+ } else if (src >= dst * 2) {
+ *ratio = 2;
+ *shift = 1;
+ } else {
+ *ratio = 1;
+ *shift = 0;
+ }
+
+ return 0;
+}
+
+static int fimc_set_prescaler(struct fimc_context *ctx, struct fimc_scaler *sc,
+ struct drm_exynos_pos *src, struct drm_exynos_pos *dst)
+{
+ struct exynos_drm_ippdrv *ippdrv = &ctx->ippdrv;
+ u32 cfg, cfg_ext, shfactor;
+ u32 pre_dst_width, pre_dst_height;
+ u32 pre_hratio, hfactor, pre_vratio, vfactor;
+ int ret = 0;
+ u32 src_w, src_h, dst_w, dst_h;
+
+ cfg_ext = fimc_read(EXYNOS_CITRGFMT);
+ if (cfg_ext & EXYNOS_CITRGFMT_INROT90_CLOCKWISE) {
+ src_w = src->h;
+ src_h = src->w;
+ } else {
+ src_w = src->w;
+ src_h = src->h;
+ }
+
+ if (cfg_ext & EXYNOS_CITRGFMT_OUTROT90_CLOCKWISE) {
+ dst_w = dst->h;
+ dst_h = dst->w;
+ } else {
+ dst_w = dst->w;
+ dst_h = dst->h;
+ }
+
+ ret = fimc_get_ratio_shift(src_w, dst_w, &pre_hratio, &hfactor);
+ if (ret) {
+ dev_err(ippdrv->dev, "failed to get ratio horizontal.\n");
+ return ret;
+ }
+
+ ret = fimc_get_ratio_shift(src_h, dst_h, &pre_vratio, &vfactor);
+ if (ret) {
+ dev_err(ippdrv->dev, "failed to get ratio vertical.\n");
+ return ret;
+ }
+
+ pre_dst_width = src_w / pre_hratio;
+ pre_dst_height = src_h / pre_vratio;
+ DRM_DEBUG_KMS("%s:pre_dst_width[%d]pre_dst_height[%d]\n", __func__,
+ pre_dst_width, pre_dst_height);
+ DRM_DEBUG_KMS("%s:pre_hratio[%d]hfactor[%d]pre_vratio[%d]vfactor[%d]\n",
+ __func__, pre_hratio, hfactor, pre_vratio, vfactor);
+
+ sc->hratio = (src_w << 14) / (dst_w << hfactor);
+ sc->vratio = (src_h << 14) / (dst_h << vfactor);
+ sc->up_h = (dst_w >= src_w) ? true : false;
+ sc->up_v = (dst_h >= src_h) ? true : false;
+ DRM_DEBUG_KMS("%s:hratio[%d]vratio[%d]up_h[%d]up_v[%d]\n",
+ __func__, sc->hratio, sc->vratio, sc->up_h, sc->up_v);
+
+ shfactor = FIMC_SHFACTOR - (hfactor + vfactor);
+ DRM_DEBUG_KMS("%s:shfactor[%d]\n", __func__, shfactor);
+
+ cfg = (EXYNOS_CISCPRERATIO_SHFACTOR(shfactor) |
+ EXYNOS_CISCPRERATIO_PREHORRATIO(pre_hratio) |
+ EXYNOS_CISCPRERATIO_PREVERRATIO(pre_vratio));
+ fimc_write(cfg, EXYNOS_CISCPRERATIO);
+
+ cfg = (EXYNOS_CISCPREDST_PREDSTWIDTH(pre_dst_width) |
+ EXYNOS_CISCPREDST_PREDSTHEIGHT(pre_dst_height));
+ fimc_write(cfg, EXYNOS_CISCPREDST);
+
+ return ret;
+}
+
+static void fimc_set_scaler(struct fimc_context *ctx, struct fimc_scaler *sc)
+{
+ u32 cfg, cfg_ext;
+
+ DRM_DEBUG_KMS("%s:range[%d]bypass[%d]up_h[%d]up_v[%d]\n",
+ __func__, sc->range, sc->bypass, sc->up_h, sc->up_v);
+ DRM_DEBUG_KMS("%s:hratio[%d]vratio[%d]\n",
+ __func__, sc->hratio, sc->vratio);
+
+ cfg = fimc_read(EXYNOS_CISCCTRL);
+ cfg &= ~(EXYNOS_CISCCTRL_SCALERBYPASS |
+ EXYNOS_CISCCTRL_SCALEUP_H | EXYNOS_CISCCTRL_SCALEUP_V |
+ EXYNOS_CISCCTRL_MAIN_V_RATIO_MASK |
+ EXYNOS_CISCCTRL_MAIN_H_RATIO_MASK |
+ EXYNOS_CISCCTRL_CSCR2Y_WIDE |
+ EXYNOS_CISCCTRL_CSCY2R_WIDE);
+
+ if (sc->range)
+ cfg |= (EXYNOS_CISCCTRL_CSCR2Y_WIDE |
+ EXYNOS_CISCCTRL_CSCY2R_WIDE);
+ if (sc->bypass)
+ cfg |= EXYNOS_CISCCTRL_SCALERBYPASS;
+ if (sc->up_h)
+ cfg |= EXYNOS_CISCCTRL_SCALEUP_H;
+ if (sc->up_v)
+ cfg |= EXYNOS_CISCCTRL_SCALEUP_V;
+
+ cfg |= (EXYNOS_CISCCTRL_MAINHORRATIO((sc->hratio >> 6)) |
+ EXYNOS_CISCCTRL_MAINVERRATIO((sc->vratio >> 6)));
+ fimc_write(cfg, EXYNOS_CISCCTRL);
+
+ cfg_ext = fimc_read(EXYNOS_CIEXTEN);
+ cfg_ext &= ~EXYNOS_CIEXTEN_MAINHORRATIO_EXT_MASK;
+ cfg_ext &= ~EXYNOS_CIEXTEN_MAINVERRATIO_EXT_MASK;
+ cfg_ext |= (EXYNOS_CIEXTEN_MAINHORRATIO_EXT(sc->hratio) |
+ EXYNOS_CIEXTEN_MAINVERRATIO_EXT(sc->vratio));
+ fimc_write(cfg_ext, EXYNOS_CIEXTEN);
+}
+
+static int fimc_dst_set_size(struct device *dev, int swap,
+ struct drm_exynos_pos *pos, struct drm_exynos_sz *sz)
+{
+ struct fimc_context *ctx = get_fimc_context(dev);
+ struct drm_exynos_pos img_pos = *pos;
+ struct drm_exynos_sz img_sz = *sz;
+ u32 cfg;
+
+ DRM_DEBUG_KMS("%s:swap[%d]hsize[%d]vsize[%d]\n",
+ __func__, swap, sz->hsize, sz->vsize);
+
+ /* original size */
+ cfg = (EXYNOS_ORGOSIZE_HORIZONTAL(img_sz.hsize) |
+ EXYNOS_ORGOSIZE_VERTICAL(img_sz.vsize));
+
+ fimc_write(cfg, EXYNOS_ORGOSIZE);
+
+ DRM_DEBUG_KMS("%s:x[%d]y[%d]w[%d]h[%d]\n",
+ __func__, pos->x, pos->y, pos->w, pos->h);
+
+ /* CSC ITU */
+ cfg = fimc_read(EXYNOS_CIGCTRL);
+ cfg &= ~EXYNOS_CIGCTRL_CSC_MASK;
+
+ if (sz->hsize >= FIMC_WIDTH_ITU_709)
+ cfg |= EXYNOS_CIGCTRL_CSC_ITU709;
+ else
+ cfg |= EXYNOS_CIGCTRL_CSC_ITU601;
+
+ fimc_write(cfg, EXYNOS_CIGCTRL);
+
+ if (swap) {
+ img_pos.w = pos->h;
+ img_pos.h = pos->w;
+ img_sz.hsize = sz->vsize;
+ img_sz.vsize = sz->hsize;
+ }
+
+ /* target image size */
+ cfg = fimc_read(EXYNOS_CITRGFMT);
+ cfg &= ~(EXYNOS_CITRGFMT_TARGETH_MASK |
+ EXYNOS_CITRGFMT_TARGETV_MASK);
+ cfg |= (EXYNOS_CITRGFMT_TARGETHSIZE(img_pos.w) |
+ EXYNOS_CITRGFMT_TARGETVSIZE(img_pos.h));
+ fimc_write(cfg, EXYNOS_CITRGFMT);
+
+ /* target area */
+ cfg = EXYNOS_CITAREA_TARGET_AREA(img_pos.w * img_pos.h);
+ fimc_write(cfg, EXYNOS_CITAREA);
+
+ /* offset Y(RGB), Cb, Cr */
+ cfg = (EXYNOS_CIOYOFF_HORIZONTAL(img_pos.x) |
+ EXYNOS_CIOYOFF_VERTICAL(img_pos.y));
+ fimc_write(cfg, EXYNOS_CIOYOFF);
+ cfg = (EXYNOS_CIOCBOFF_HORIZONTAL(img_pos.x) |
+ EXYNOS_CIOCBOFF_VERTICAL(img_pos.y));
+ fimc_write(cfg, EXYNOS_CIOCBOFF);
+ cfg = (EXYNOS_CIOCROFF_HORIZONTAL(img_pos.x) |
+ EXYNOS_CIOCROFF_VERTICAL(img_pos.y));
+ fimc_write(cfg, EXYNOS_CIOCROFF);
+
+ return 0;
+}
+
+static int fimc_dst_get_buf_seq(struct fimc_context *ctx)
+{
+ u32 cfg, i, buf_num = 0;
+ u32 mask = 0x00000001;
+
+ cfg = fimc_read(EXYNOS_CIFCNTSEQ);
+
+ for (i = 0; i < FIMC_REG_SZ; i++)
+ if (cfg & (mask << i))
+ buf_num++;
+
+ DRM_DEBUG_KMS("%s:buf_num[%d]\n", __func__, buf_num);
+
+ return buf_num;
+}
+
+static int fimc_dst_set_buf_seq(struct fimc_context *ctx, u32 buf_id,
+ enum drm_exynos_ipp_buf_type buf_type)
+{
+ struct exynos_drm_ippdrv *ippdrv = &ctx->ippdrv;
+ bool enable;
+ u32 cfg;
+ u32 mask = 0x00000001 << buf_id;
+ int ret = 0;
+
+ DRM_DEBUG_KMS("%s:buf_id[%d]buf_type[%d]\n", __func__,
+ buf_id, buf_type);
+
+ mutex_lock(&ctx->lock);
+
+ /* mask register set */
+ cfg = fimc_read(EXYNOS_CIFCNTSEQ);
+
+ switch (buf_type) {
+ case IPP_BUF_ENQUEUE:
+ enable = true;
+ break;
+ case IPP_BUF_DEQUEUE:
+ enable = false;
+ break;
+ default:
+ dev_err(ippdrv->dev, "invalid buf ctrl parameter.\n");
+ ret = -EINVAL;
+ goto err_unlock;
+ }
+
+ /* sequence id */
+ cfg &= (~mask);
+ cfg |= (enable << buf_id);
+ fimc_write(cfg, EXYNOS_CIFCNTSEQ);
+
+ /* interrupt enable */
+ if (buf_type == IPP_BUF_ENQUEUE &&
+ fimc_dst_get_buf_seq(ctx) >= FIMC_BUF_START)
+ fimc_handle_irq(ctx, true, false, true);
+
+ /* interrupt disable */
+ if (buf_type == IPP_BUF_DEQUEUE &&
+ fimc_dst_get_buf_seq(ctx) <= FIMC_BUF_STOP)
+ fimc_handle_irq(ctx, false, false, true);
+
+err_unlock:
+ mutex_unlock(&ctx->lock);
+ return ret;
+}
+
+static int fimc_dst_set_addr(struct device *dev,
+ struct drm_exynos_ipp_buf_info *buf_info, u32 buf_id,
+ enum drm_exynos_ipp_buf_type buf_type)
+{
+ struct fimc_context *ctx = get_fimc_context(dev);
+ struct exynos_drm_ippdrv *ippdrv = &ctx->ippdrv;
+ struct drm_exynos_ipp_cmd_node *c_node = ippdrv->cmd;
+ struct drm_exynos_ipp_property *property;
+ struct drm_exynos_ipp_config *config;
+
+ if (!c_node) {
+ DRM_ERROR("failed to get c_node.\n");
+ return -EINVAL;
+ }
+
+ property = &c_node->property;
+ if (!property) {
+ DRM_ERROR("failed to get property.\n");
+ return -EINVAL;
+ }
+
+ DRM_DEBUG_KMS("%s:prop_id[%d]buf_id[%d]buf_type[%d]\n", __func__,
+ property->prop_id, buf_id, buf_type);
+
+ if (buf_id > FIMC_MAX_DST) {
+ dev_info(ippdrv->dev, "inavlid buf_id %d.\n", buf_id);
+ return -ENOMEM;
+ }
+
+ /* address register set */
+ switch (buf_type) {
+ case IPP_BUF_ENQUEUE:
+ config = &property->config[EXYNOS_DRM_OPS_DST];
+
+ fimc_write(buf_info->base[EXYNOS_DRM_PLANAR_Y],
+ EXYNOS_CIOYSA(buf_id));
+
+ if (config->fmt == DRM_FORMAT_YVU420) {
+ fimc_write(buf_info->base[EXYNOS_DRM_PLANAR_CR],
+ EXYNOS_CIOCBSA(buf_id));
+ fimc_write(buf_info->base[EXYNOS_DRM_PLANAR_CB],
+ EXYNOS_CIOCRSA(buf_id));
+ } else {
+ fimc_write(buf_info->base[EXYNOS_DRM_PLANAR_CB],
+ EXYNOS_CIOCBSA(buf_id));
+ fimc_write(buf_info->base[EXYNOS_DRM_PLANAR_CR],
+ EXYNOS_CIOCRSA(buf_id));
+ }
+ break;
+ case IPP_BUF_DEQUEUE:
+ fimc_write(0x0, EXYNOS_CIOYSA(buf_id));
+ fimc_write(0x0, EXYNOS_CIOCBSA(buf_id));
+ fimc_write(0x0, EXYNOS_CIOCRSA(buf_id));
+ break;
+ default:
+ /* bypass */
+ break;
+ }
+
+ return fimc_dst_set_buf_seq(ctx, buf_id, buf_type);
+}
+
+static struct exynos_drm_ipp_ops fimc_dst_ops = {
+ .set_fmt = fimc_dst_set_fmt,
+ .set_transf = fimc_dst_set_transf,
+ .set_size = fimc_dst_set_size,
+ .set_addr = fimc_dst_set_addr,
+};
+
+static int fimc_clk_ctrl(struct fimc_context *ctx, bool enable)
+{
+ DRM_DEBUG_KMS("%s:enable[%d]\n", __func__, enable);
+
+ if (enable) {
+ clk_enable(ctx->sclk_fimc_clk);
+ clk_enable(ctx->fimc_clk);
+ clk_enable(ctx->wb_clk);
+ ctx->suspended = false;
+ } else {
+ clk_disable(ctx->sclk_fimc_clk);
+ clk_disable(ctx->fimc_clk);
+ clk_disable(ctx->wb_clk);
+ ctx->suspended = true;
+ }
+
+ return 0;
+}
+
+static irqreturn_t fimc_irq_handler(int irq, void *dev_id)
+{
+ struct fimc_context *ctx = dev_id;
+ struct exynos_drm_ippdrv *ippdrv = &ctx->ippdrv;
+ struct drm_exynos_ipp_cmd_node *c_node = ippdrv->cmd;
+ struct drm_exynos_ipp_event_work *event_work =
+ c_node->event_work;
+ int buf_id;
+
+ DRM_DEBUG_KMS("%s:fimc id[%d]\n", __func__, ctx->id);
+
+ fimc_clear_irq(ctx);
+ if (fimc_check_ovf(ctx))
+ return IRQ_NONE;
+
+ if (!fimc_check_frame_end(ctx))
+ return IRQ_NONE;
+
+ buf_id = fimc_get_buf_id(ctx);
+ if (buf_id < 0)
+ return IRQ_HANDLED;
+
+ DRM_DEBUG_KMS("%s:buf_id[%d]\n", __func__, buf_id);
+
+ if (fimc_dst_set_buf_seq(ctx, buf_id, IPP_BUF_DEQUEUE) < 0) {
+ DRM_ERROR("failed to dequeue.\n");
+ return IRQ_HANDLED;
+ }
+
+ event_work->ippdrv = ippdrv;
+ event_work->buf_id[EXYNOS_DRM_OPS_DST] = buf_id;
+ queue_work(ippdrv->event_workq, (struct work_struct *)event_work);
+
+ return IRQ_HANDLED;
+}
+
+static int fimc_init_prop_list(struct exynos_drm_ippdrv *ippdrv)
+{
+ struct drm_exynos_ipp_prop_list *prop_list;
+
+ DRM_DEBUG_KMS("%s\n", __func__);
+
+ prop_list = devm_kzalloc(ippdrv->dev, sizeof(*prop_list), GFP_KERNEL);
+ if (!prop_list) {
+ DRM_ERROR("failed to alloc property list.\n");
+ return -ENOMEM;
+ }
+
+ prop_list->version = 1;
+ prop_list->writeback = 1;
+ prop_list->refresh_min = FIMC_REFRESH_MIN;
+ prop_list->refresh_max = FIMC_REFRESH_MAX;
+ prop_list->flip = (1 << EXYNOS_DRM_FLIP_NONE) |
+ (1 << EXYNOS_DRM_FLIP_VERTICAL) |
+ (1 << EXYNOS_DRM_FLIP_HORIZONTAL);
+ prop_list->degree = (1 << EXYNOS_DRM_DEGREE_0) |
+ (1 << EXYNOS_DRM_DEGREE_90) |
+ (1 << EXYNOS_DRM_DEGREE_180) |
+ (1 << EXYNOS_DRM_DEGREE_270);
+ prop_list->csc = 1;
+ prop_list->crop = 1;
+ prop_list->crop_max.hsize = FIMC_CROP_MAX;
+ prop_list->crop_max.vsize = FIMC_CROP_MAX;
+ prop_list->crop_min.hsize = FIMC_CROP_MIN;
+ prop_list->crop_min.vsize = FIMC_CROP_MIN;
+ prop_list->scale = 1;
+ prop_list->scale_max.hsize = FIMC_SCALE_MAX;
+ prop_list->scale_max.vsize = FIMC_SCALE_MAX;
+ prop_list->scale_min.hsize = FIMC_SCALE_MIN;
+ prop_list->scale_min.vsize = FIMC_SCALE_MIN;
+
+ ippdrv->prop_list = prop_list;
+
+ return 0;
+}
+
+static inline bool fimc_check_drm_flip(enum drm_exynos_flip flip)
+{
+ switch (flip) {
+ case EXYNOS_DRM_FLIP_NONE:
+ case EXYNOS_DRM_FLIP_VERTICAL:
+ case EXYNOS_DRM_FLIP_HORIZONTAL:
+ return true;
+ default:
+ DRM_DEBUG_KMS("%s:invalid flip\n", __func__);
+ return false;
+ }
+}
+
+static int fimc_ippdrv_check_property(struct device *dev,
+ struct drm_exynos_ipp_property *property)
+{
+ struct fimc_context *ctx = get_fimc_context(dev);
+ struct exynos_drm_ippdrv *ippdrv = &ctx->ippdrv;
+ struct drm_exynos_ipp_prop_list *pp = ippdrv->prop_list;
+ struct drm_exynos_ipp_config *config;
+ struct drm_exynos_pos *pos;
+ struct drm_exynos_sz *sz;
+ bool swap;
+ int i;
+
+ DRM_DEBUG_KMS("%s\n", __func__);
+
+ for_each_ipp_ops(i) {
+ if ((i == EXYNOS_DRM_OPS_SRC) &&
+ (property->cmd == IPP_CMD_WB))
+ continue;
+
+ config = &property->config[i];
+ pos = &config->pos;
+ sz = &config->sz;
+
+ /* check for flip */
+ if (!fimc_check_drm_flip(config->flip)) {
+ DRM_ERROR("invalid flip.\n");
+ goto err_property;
+ }
+
+ /* check for degree */
+ switch (config->degree) {
+ case EXYNOS_DRM_DEGREE_90:
+ case EXYNOS_DRM_DEGREE_270:
+ swap = true;
+ break;
+ case EXYNOS_DRM_DEGREE_0:
+ case EXYNOS_DRM_DEGREE_180:
+ swap = false;
+ break;
+ default:
+ DRM_ERROR("invalid degree.\n");
+ goto err_property;
+ }
+
+ /* check for buffer bound */
+ if ((pos->x + pos->w > sz->hsize) ||
+ (pos->y + pos->h > sz->vsize)) {
+ DRM_ERROR("out of buf bound.\n");
+ goto err_property;
+ }
+
+ /* check for crop */
+ if ((i == EXYNOS_DRM_OPS_SRC) && (pp->crop)) {
+ if (swap) {
+ if ((pos->h < pp->crop_min.hsize) ||
+ (sz->vsize > pp->crop_max.hsize) ||
+ (pos->w < pp->crop_min.vsize) ||
+ (sz->hsize > pp->crop_max.vsize)) {
+ DRM_ERROR("out of crop size.\n");
+ goto err_property;
+ }
+ } else {
+ if ((pos->w < pp->crop_min.hsize) ||
+ (sz->hsize > pp->crop_max.hsize) ||
+ (pos->h < pp->crop_min.vsize) ||
+ (sz->vsize > pp->crop_max.vsize)) {
+ DRM_ERROR("out of crop size.\n");
+ goto err_property;
+ }
+ }
+ }
+
+ /* check for scale */
+ if ((i == EXYNOS_DRM_OPS_DST) && (pp->scale)) {
+ if (swap) {
+ if ((pos->h < pp->scale_min.hsize) ||
+ (sz->vsize > pp->scale_max.hsize) ||
+ (pos->w < pp->scale_min.vsize) ||
+ (sz->hsize > pp->scale_max.vsize)) {
+ DRM_ERROR("out of scale size.\n");
+ goto err_property;
+ }
+ } else {
+ if ((pos->w < pp->scale_min.hsize) ||
+ (sz->hsize > pp->scale_max.hsize) ||
+ (pos->h < pp->scale_min.vsize) ||
+ (sz->vsize > pp->scale_max.vsize)) {
+ DRM_ERROR("out of scale size.\n");
+ goto err_property;
+ }
+ }
+ }
+ }
+
+ return 0;
+
+err_property:
+ for_each_ipp_ops(i) {
+ if ((i == EXYNOS_DRM_OPS_SRC) &&
+ (property->cmd == IPP_CMD_WB))
+ continue;
+
+ config = &property->config[i];
+ pos = &config->pos;
+ sz = &config->sz;
+
+ DRM_ERROR("[%s]f[%d]r[%d]pos[%d %d %d %d]sz[%d %d]\n",
+ i ? "dst" : "src", config->flip, config->degree,
+ pos->x, pos->y, pos->w, pos->h,
+ sz->hsize, sz->vsize);
+ }
+
+ return -EINVAL;
+}
+
+static void fimc_clear_addr(struct fimc_context *ctx)
+{
+ int i;
+
+ DRM_DEBUG_KMS("%s:\n", __func__);
+
+ for (i = 0; i < FIMC_MAX_SRC; i++) {
+ fimc_write(0, EXYNOS_CIIYSA(i));
+ fimc_write(0, EXYNOS_CIICBSA(i));
+ fimc_write(0, EXYNOS_CIICRSA(i));
+ }
+
+ for (i = 0; i < FIMC_MAX_DST; i++) {
+ fimc_write(0, EXYNOS_CIOYSA(i));
+ fimc_write(0, EXYNOS_CIOCBSA(i));
+ fimc_write(0, EXYNOS_CIOCRSA(i));
+ }
+}
+
+static int fimc_ippdrv_reset(struct device *dev)
+{
+ struct fimc_context *ctx = get_fimc_context(dev);
+
+ DRM_DEBUG_KMS("%s\n", __func__);
+
+ /* reset h/w block */
+ fimc_sw_reset(ctx, false);
+
+ /* reset scaler capability */
+ memset(&ctx->sc, 0x0, sizeof(ctx->sc));
+
+ fimc_clear_addr(ctx);
+
+ return 0;
+}
+
+static int fimc_ippdrv_start(struct device *dev, enum drm_exynos_ipp_cmd cmd)
+{
+ struct fimc_context *ctx = get_fimc_context(dev);
+ struct exynos_drm_ippdrv *ippdrv = &ctx->ippdrv;
+ struct drm_exynos_ipp_cmd_node *c_node = ippdrv->cmd;
+ struct drm_exynos_ipp_property *property;
+ struct drm_exynos_ipp_config *config;
+ struct drm_exynos_pos img_pos[EXYNOS_DRM_OPS_MAX];
+ struct drm_exynos_ipp_set_wb set_wb;
+ int ret, i;
+ u32 cfg0, cfg1;
+
+ DRM_DEBUG_KMS("%s:cmd[%d]\n", __func__, cmd);
+
+ if (!c_node) {
+ DRM_ERROR("failed to get c_node.\n");
+ return -EINVAL;
+ }
+
+ property = &c_node->property;
+ if (!property) {
+ DRM_ERROR("failed to get property.\n");
+ return -EINVAL;
+ }
+
+ fimc_handle_irq(ctx, true, false, true);
+
+ for_each_ipp_ops(i) {
+ config = &property->config[i];
+ img_pos[i] = config->pos;
+ }
+
+ ret = fimc_set_prescaler(ctx, &ctx->sc,
+ &img_pos[EXYNOS_DRM_OPS_SRC],
+ &img_pos[EXYNOS_DRM_OPS_DST]);
+ if (ret) {
+ dev_err(dev, "failed to set precalser.\n");
+ return ret;
+ }
+
+ /* If set ture, we can save jpeg about screen */
+ fimc_handle_jpeg(ctx, false);
+ fimc_set_scaler(ctx, &ctx->sc);
+ fimc_set_polarity(ctx, &ctx->pol);
+
+ switch (cmd) {
+ case IPP_CMD_M2M:
+ fimc_set_type_ctrl(ctx, FIMC_WB_NONE);
+ fimc_handle_lastend(ctx, false);
+
+ /* setup dma */
+ cfg0 = fimc_read(EXYNOS_MSCTRL);
+ cfg0 &= ~EXYNOS_MSCTRL_INPUT_MASK;
+ cfg0 |= EXYNOS_MSCTRL_INPUT_MEMORY;
+ fimc_write(cfg0, EXYNOS_MSCTRL);
+ break;
+ case IPP_CMD_WB:
+ fimc_set_type_ctrl(ctx, FIMC_WB_A);
+ fimc_handle_lastend(ctx, true);
+
+ /* setup FIMD */
+ fimc_set_camblk_fimd0_wb(ctx);
+
+ set_wb.enable = 1;
+ set_wb.refresh = property->refresh_rate;
+ exynos_drm_ippnb_send_event(IPP_SET_WRITEBACK, (void *)&set_wb);
+ break;
+ case IPP_CMD_OUTPUT:
+ default:
+ ret = -EINVAL;
+ dev_err(dev, "invalid operations.\n");
+ return ret;
+ }
+
+ /* Reset status */
+ fimc_write(0x0, EXYNOS_CISTATUS);
+
+ cfg0 = fimc_read(EXYNOS_CIIMGCPT);
+ cfg0 &= ~EXYNOS_CIIMGCPT_IMGCPTEN_SC;
+ cfg0 |= EXYNOS_CIIMGCPT_IMGCPTEN_SC;
+
+ /* Scaler */
+ cfg1 = fimc_read(EXYNOS_CISCCTRL);
+ cfg1 &= ~EXYNOS_CISCCTRL_SCAN_MASK;
+ cfg1 |= (EXYNOS_CISCCTRL_PROGRESSIVE |
+ EXYNOS_CISCCTRL_SCALERSTART);
+
+ fimc_write(cfg1, EXYNOS_CISCCTRL);
+
+ /* Enable image capture*/
+ cfg0 |= EXYNOS_CIIMGCPT_IMGCPTEN;
+ fimc_write(cfg0, EXYNOS_CIIMGCPT);
+
+ /* Disable frame end irq */
+ cfg0 = fimc_read(EXYNOS_CIGCTRL);
+ cfg0 &= ~EXYNOS_CIGCTRL_IRQ_END_DISABLE;
+ fimc_write(cfg0, EXYNOS_CIGCTRL);
+
+ cfg0 = fimc_read(EXYNOS_CIOCTRL);
+ cfg0 &= ~EXYNOS_CIOCTRL_WEAVE_MASK;
+ fimc_write(cfg0, EXYNOS_CIOCTRL);
+
+ if (cmd == IPP_CMD_M2M) {
+ cfg0 = fimc_read(EXYNOS_MSCTRL);
+ cfg0 |= EXYNOS_MSCTRL_ENVID;
+ fimc_write(cfg0, EXYNOS_MSCTRL);
+
+ cfg0 = fimc_read(EXYNOS_MSCTRL);
+ cfg0 |= EXYNOS_MSCTRL_ENVID;
+ fimc_write(cfg0, EXYNOS_MSCTRL);
+ }
+
+ return 0;
+}
+
+static void fimc_ippdrv_stop(struct device *dev, enum drm_exynos_ipp_cmd cmd)
+{
+ struct fimc_context *ctx = get_fimc_context(dev);
+ struct drm_exynos_ipp_set_wb set_wb = {0, 0};
+ u32 cfg;
+
+ DRM_DEBUG_KMS("%s:cmd[%d]\n", __func__, cmd);
+
+ switch (cmd) {
+ case IPP_CMD_M2M:
+ /* Source clear */
+ cfg = fimc_read(EXYNOS_MSCTRL);
+ cfg &= ~EXYNOS_MSCTRL_INPUT_MASK;
+ cfg &= ~EXYNOS_MSCTRL_ENVID;
+ fimc_write(cfg, EXYNOS_MSCTRL);
+ break;
+ case IPP_CMD_WB:
+ exynos_drm_ippnb_send_event(IPP_SET_WRITEBACK, (void *)&set_wb);
+ break;
+ case IPP_CMD_OUTPUT:
+ default:
+ dev_err(dev, "invalid operations.\n");
+ break;
+ }
+
+ fimc_handle_irq(ctx, false, false, true);
+
+ /* reset sequence */
+ fimc_write(0x0, EXYNOS_CIFCNTSEQ);
+
+ /* Scaler disable */
+ cfg = fimc_read(EXYNOS_CISCCTRL);
+ cfg &= ~EXYNOS_CISCCTRL_SCALERSTART;
+ fimc_write(cfg, EXYNOS_CISCCTRL);
+
+ /* Disable image capture */
+ cfg = fimc_read(EXYNOS_CIIMGCPT);
+ cfg &= ~(EXYNOS_CIIMGCPT_IMGCPTEN_SC | EXYNOS_CIIMGCPT_IMGCPTEN);
+ fimc_write(cfg, EXYNOS_CIIMGCPT);
+
+ /* Enable frame end irq */
+ cfg = fimc_read(EXYNOS_CIGCTRL);
+ cfg |= EXYNOS_CIGCTRL_IRQ_END_DISABLE;
+ fimc_write(cfg, EXYNOS_CIGCTRL);
+}
+
+static int __devinit fimc_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct fimc_context *ctx;
+ struct clk *parent_clk;
+ struct resource *res;
+ struct exynos_drm_ippdrv *ippdrv;
+ struct exynos_drm_fimc_pdata *pdata;
+ struct fimc_driverdata *ddata;
+ int ret;
+
+ pdata = pdev->dev.platform_data;
+ if (!pdata) {
+ dev_err(dev, "no platform data specified.\n");
+ return -EINVAL;
+ }
+
+ ctx = devm_kzalloc(dev, sizeof(*ctx), GFP_KERNEL);
+ if (!ctx)
+ return -ENOMEM;
+
+ ddata = (struct fimc_driverdata *)
+ platform_get_device_id(pdev)->driver_data;
+
+ /* clock control */
+ ctx->sclk_fimc_clk = clk_get(dev, "sclk_fimc");
+ if (IS_ERR(ctx->sclk_fimc_clk)) {
+ dev_err(dev, "failed to get src fimc clock.\n");
+ ret = PTR_ERR(ctx->sclk_fimc_clk);
+ goto err_ctx;
+ }
+ clk_enable(ctx->sclk_fimc_clk);
+
+ ctx->fimc_clk = clk_get(dev, "fimc");
+ if (IS_ERR(ctx->fimc_clk)) {
+ dev_err(dev, "failed to get fimc clock.\n");
+ ret = PTR_ERR(ctx->fimc_clk);
+ clk_disable(ctx->sclk_fimc_clk);
+ clk_put(ctx->sclk_fimc_clk);
+ goto err_ctx;
+ }
+
+ ctx->wb_clk = clk_get(dev, "pxl_async0");
+ if (IS_ERR(ctx->wb_clk)) {
+ dev_err(dev, "failed to get writeback a clock.\n");
+ ret = PTR_ERR(ctx->wb_clk);
+ clk_disable(ctx->sclk_fimc_clk);
+ clk_put(ctx->sclk_fimc_clk);
+ clk_put(ctx->fimc_clk);
+ goto err_ctx;
+ }
+
+ ctx->wb_b_clk = clk_get(dev, "pxl_async1");
+ if (IS_ERR(ctx->wb_b_clk)) {
+ dev_err(dev, "failed to get writeback b clock.\n");
+ ret = PTR_ERR(ctx->wb_b_clk);
+ clk_disable(ctx->sclk_fimc_clk);
+ clk_put(ctx->sclk_fimc_clk);
+ clk_put(ctx->fimc_clk);
+ clk_put(ctx->wb_clk);
+ goto err_ctx;
+ }
+
+ parent_clk = clk_get(dev, ddata->parent_clk);
+
+ if (IS_ERR(parent_clk)) {
+ dev_err(dev, "failed to get parent clock.\n");
+ ret = PTR_ERR(parent_clk);
+ clk_disable(ctx->sclk_fimc_clk);
+ clk_put(ctx->sclk_fimc_clk);
+ clk_put(ctx->fimc_clk);
+ clk_put(ctx->wb_clk);
+ clk_put(ctx->wb_b_clk);
+ goto err_ctx;
+ }
+
+ if (clk_set_parent(ctx->sclk_fimc_clk, parent_clk)) {
+ dev_err(dev, "failed to set parent.\n");
+ ret = -EINVAL;
+ clk_put(parent_clk);
+ clk_disable(ctx->sclk_fimc_clk);
+ clk_put(ctx->sclk_fimc_clk);
+ clk_put(ctx->fimc_clk);
+ clk_put(ctx->wb_clk);
+ clk_put(ctx->wb_b_clk);
+ goto err_ctx;
+ }
+
+ clk_put(parent_clk);
+ clk_set_rate(ctx->sclk_fimc_clk, pdata->clk_rate);
+
+ /* resource memory */
+ ctx->regs_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!ctx->regs_res) {
+ dev_err(dev, "failed to find registers.\n");
+ ret = -ENOENT;
+ goto err_clk;
+ }
+
+ ctx->regs = devm_request_and_ioremap(dev, ctx->regs_res);
+ if (!ctx->regs) {
+ dev_err(dev, "failed to map registers.\n");
+ ret = -ENXIO;
+ goto err_clk;
+ }
+
+ /* resource irq */
+ res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
+ if (!res) {
+ dev_err(dev, "failed to request irq resource.\n");
+ ret = -ENOENT;
+ goto err_get_regs;
+ }
+
+ ctx->irq = res->start;
+ ret = request_threaded_irq(ctx->irq, NULL, fimc_irq_handler,
+ IRQF_ONESHOT, "drm_fimc", ctx);
+ if (ret < 0) {
+ dev_err(dev, "failed to request irq.\n");
+ goto err_get_regs;
+ }
+
+ /* context initailization */
+ ctx->id = pdev->id;
+ ctx->pol = pdata->pol;
+ ctx->ddata = ddata;
+
+ ippdrv = &ctx->ippdrv;
+ ippdrv->dev = dev;
+ ippdrv->ops[EXYNOS_DRM_OPS_SRC] = &fimc_src_ops;
+ ippdrv->ops[EXYNOS_DRM_OPS_DST] = &fimc_dst_ops;
+ ippdrv->check_property = fimc_ippdrv_check_property;
+ ippdrv->reset = fimc_ippdrv_reset;
+ ippdrv->start = fimc_ippdrv_start;
+ ippdrv->stop = fimc_ippdrv_stop;
+ ret = fimc_init_prop_list(ippdrv);
+ if (ret < 0) {
+ dev_err(dev, "failed to init property list.\n");
+ goto err_get_irq;
+ }
+
+ DRM_DEBUG_KMS("%s:id[%d]ippdrv[0x%x]\n", __func__, ctx->id,
+ (int)ippdrv);
+
+ mutex_init(&ctx->lock);
+ platform_set_drvdata(pdev, ctx);
+
+ pm_runtime_set_active(dev);
+ pm_runtime_enable(dev);
+
+ ret = exynos_drm_ippdrv_register(ippdrv);
+ if (ret < 0) {
+ dev_err(dev, "failed to register drm fimc device.\n");
+ goto err_ippdrv_register;
+ }
+
+ dev_info(&pdev->dev, "drm fimc registered successfully.\n");
+
+ return 0;
+
+err_ippdrv_register:
+ devm_kfree(dev, ippdrv->prop_list);
+ pm_runtime_disable(dev);
+err_get_irq:
+ free_irq(ctx->irq, ctx);
+err_get_regs:
+ devm_iounmap(dev, ctx->regs);
+err_clk:
+ clk_put(ctx->sclk_fimc_clk);
+ clk_put(ctx->fimc_clk);
+ clk_put(ctx->wb_clk);
+ clk_put(ctx->wb_b_clk);
+err_ctx:
+ devm_kfree(dev, ctx);
+ return ret;
+}
+
+static int __devexit fimc_remove(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct fimc_context *ctx = get_fimc_context(dev);
+ struct exynos_drm_ippdrv *ippdrv = &ctx->ippdrv;
+
+ devm_kfree(dev, ippdrv->prop_list);
+ exynos_drm_ippdrv_unregister(ippdrv);
+ mutex_destroy(&ctx->lock);
+
+ pm_runtime_set_suspended(dev);
+ pm_runtime_disable(dev);
+
+ free_irq(ctx->irq, ctx);
+ devm_iounmap(dev, ctx->regs);
+
+ clk_put(ctx->sclk_fimc_clk);
+ clk_put(ctx->fimc_clk);
+ clk_put(ctx->wb_clk);
+ clk_put(ctx->wb_b_clk);
+
+ devm_kfree(dev, ctx);
+
+ return 0;
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int fimc_suspend(struct device *dev)
+{
+ struct fimc_context *ctx = get_fimc_context(dev);
+
+ DRM_DEBUG_KMS("%s:id[%d]\n", __func__, ctx->id);
+
+ if (pm_runtime_suspended(dev))
+ return 0;
+
+ return fimc_clk_ctrl(ctx, false);
+}
+
+static int fimc_resume(struct device *dev)
+{
+ struct fimc_context *ctx = get_fimc_context(dev);
+
+ DRM_DEBUG_KMS("%s:id[%d]\n", __func__, ctx->id);
+
+ if (!pm_runtime_suspended(dev))
+ return fimc_clk_ctrl(ctx, true);
+
+ return 0;
+}
+#endif
+
+#ifdef CONFIG_PM_RUNTIME
+static int fimc_runtime_suspend(struct device *dev)
+{
+ struct fimc_context *ctx = get_fimc_context(dev);
+
+ DRM_DEBUG_KMS("%s:id[%d]\n", __func__, ctx->id);
+
+ return fimc_clk_ctrl(ctx, false);
+}
+
+static int fimc_runtime_resume(struct device *dev)
+{
+ struct fimc_context *ctx = get_fimc_context(dev);
+
+ DRM_DEBUG_KMS("%s:id[%d]\n", __func__, ctx->id);
+
+ return fimc_clk_ctrl(ctx, true);
+}
+#endif
+
+static struct fimc_driverdata exynos4210_fimc_data = {
+ .parent_clk = "mout_mpll",
+};
+
+static struct fimc_driverdata exynos4410_fimc_data = {
+ .parent_clk = "mout_mpll_user",
+};
+
+static struct platform_device_id fimc_driver_ids[] = {
+ {
+ .name = "exynos4210-fimc",
+ .driver_data = (unsigned long)&exynos4210_fimc_data,
+ }, {
+ .name = "exynos4412-fimc",
+ .driver_data = (unsigned long)&exynos4410_fimc_data,
+ },
+ {},
+};
+MODULE_DEVICE_TABLE(platform, fimc_driver_ids);
+
+static const struct dev_pm_ops fimc_pm_ops = {
+ SET_SYSTEM_SLEEP_PM_OPS(fimc_suspend, fimc_resume)
+ SET_RUNTIME_PM_OPS(fimc_runtime_suspend, fimc_runtime_resume, NULL)
+};
+
+struct platform_driver fimc_driver = {
+ .probe = fimc_probe,
+ .remove = __devexit_p(fimc_remove),
+ .id_table = fimc_driver_ids,
+ .driver = {
+ .name = "exynos-drm-fimc",
+ .owner = THIS_MODULE,
+ .pm = &fimc_pm_ops,
+ },
+};
+
diff --git a/drivers/gpu/drm/exynos/exynos_drm_fimc.h b/drivers/gpu/drm/exynos/exynos_drm_fimc.h
new file mode 100644
index 000000000000..dc970fa0d888
--- /dev/null
+++ b/drivers/gpu/drm/exynos/exynos_drm_fimc.h
@@ -0,0 +1,37 @@
+/*
+ * Copyright (c) 2012 Samsung Electronics Co., Ltd.
+ *
+ * Authors:
+ * Eunchul Kim <chulspro.kim@samsung.com>
+ * Jinyoung Jeon <jy0.jeon@samsung.com>
+ * Sangmin Lee <lsmin.lee@samsung.com>
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#ifndef _EXYNOS_DRM_FIMC_H_
+#define _EXYNOS_DRM_FIMC_H_
+
+/*
+ * TODO
+ * FIMD output interface notifier callback.
+ */
+
+#endif /* _EXYNOS_DRM_FIMC_H_ */
diff --git a/drivers/gpu/drm/exynos/exynos_drm_fimd.c b/drivers/gpu/drm/exynos/exynos_drm_fimd.c
index 130a2b510d4a..bf0d9baca2bc 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_fimd.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_fimd.c
@@ -17,6 +17,7 @@
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/clk.h>
+#include <linux/of_device.h>
#include <linux/pm_runtime.h>
#include <video/samsung_fimd.h>
@@ -25,6 +26,7 @@
#include "exynos_drm_drv.h"
#include "exynos_drm_fbdev.h"
#include "exynos_drm_crtc.h"
+#include "exynos_drm_iommu.h"
/*
* FIMD is stand for Fully Interactive Mobile Display and
@@ -61,11 +63,11 @@ struct fimd_driver_data {
unsigned int timing_base;
};
-struct fimd_driver_data exynos4_fimd_driver_data = {
+static struct fimd_driver_data exynos4_fimd_driver_data = {
.timing_base = 0x0,
};
-struct fimd_driver_data exynos5_fimd_driver_data = {
+static struct fimd_driver_data exynos5_fimd_driver_data = {
.timing_base = 0x20000,
};
@@ -78,10 +80,10 @@ struct fimd_win_data {
unsigned int fb_height;
unsigned int bpp;
dma_addr_t dma_addr;
- void __iomem *vaddr;
unsigned int buf_offsize;
unsigned int line_size; /* bytes */
bool enabled;
+ bool resume;
};
struct fimd_context {
@@ -99,13 +101,34 @@ struct fimd_context {
u32 vidcon1;
bool suspended;
struct mutex lock;
+ wait_queue_head_t wait_vsync_queue;
+ atomic_t wait_vsync_event;
struct exynos_drm_panel_info *panel;
};
+#ifdef CONFIG_OF
+static const struct of_device_id fimd_driver_dt_match[] = {
+ { .compatible = "samsung,exynos4-fimd",
+ .data = &exynos4_fimd_driver_data },
+ { .compatible = "samsung,exynos5-fimd",
+ .data = &exynos5_fimd_driver_data },
+ {},
+};
+MODULE_DEVICE_TABLE(of, fimd_driver_dt_match);
+#endif
+
static inline struct fimd_driver_data *drm_fimd_get_driver_data(
struct platform_device *pdev)
{
+#ifdef CONFIG_OF
+ const struct of_device_id *of_id =
+ of_match_device(fimd_driver_dt_match, &pdev->dev);
+
+ if (of_id)
+ return (struct fimd_driver_data *)of_id->data;
+#endif
+
return (struct fimd_driver_data *)
platform_get_device_id(pdev)->driver_data;
}
@@ -240,7 +263,9 @@ static void fimd_commit(struct device *dev)
/* setup horizontal and vertical display size. */
val = VIDTCON2_LINEVAL(timing->yres - 1) |
- VIDTCON2_HOZVAL(timing->xres - 1);
+ VIDTCON2_HOZVAL(timing->xres - 1) |
+ VIDTCON2_LINEVAL_E(timing->yres - 1) |
+ VIDTCON2_HOZVAL_E(timing->xres - 1);
writel(val, ctx->regs + driver_data->timing_base + VIDTCON2);
/* setup clock source, clock divider, enable dma. */
@@ -307,12 +332,32 @@ static void fimd_disable_vblank(struct device *dev)
}
}
+static void fimd_wait_for_vblank(struct device *dev)
+{
+ struct fimd_context *ctx = get_fimd_context(dev);
+
+ if (ctx->suspended)
+ return;
+
+ atomic_set(&ctx->wait_vsync_event, 1);
+
+ /*
+ * wait for FIMD to signal VSYNC interrupt or return after
+ * timeout which is set to 50ms (refresh rate of 20).
+ */
+ if (!wait_event_timeout(ctx->wait_vsync_queue,
+ !atomic_read(&ctx->wait_vsync_event),
+ DRM_HZ/20))
+ DRM_DEBUG_KMS("vblank wait timed out.\n");
+}
+
static struct exynos_drm_manager_ops fimd_manager_ops = {
.dpms = fimd_dpms,
.apply = fimd_apply,
.commit = fimd_commit,
.enable_vblank = fimd_enable_vblank,
.disable_vblank = fimd_disable_vblank,
+ .wait_for_vblank = fimd_wait_for_vblank,
};
static void fimd_win_mode_set(struct device *dev,
@@ -351,7 +396,6 @@ static void fimd_win_mode_set(struct device *dev,
win_data->fb_width = overlay->fb_width;
win_data->fb_height = overlay->fb_height;
win_data->dma_addr = overlay->dma_addr[0] + offset;
- win_data->vaddr = overlay->vaddr[0] + offset;
win_data->bpp = overlay->bpp;
win_data->buf_offsize = (overlay->fb_width - overlay->crtc_width) *
(overlay->bpp >> 3);
@@ -361,9 +405,7 @@ static void fimd_win_mode_set(struct device *dev,
win_data->offset_x, win_data->offset_y);
DRM_DEBUG_KMS("ovl_width = %d, ovl_height = %d\n",
win_data->ovl_width, win_data->ovl_height);
- DRM_DEBUG_KMS("paddr = 0x%lx, vaddr = 0x%lx\n",
- (unsigned long)win_data->dma_addr,
- (unsigned long)win_data->vaddr);
+ DRM_DEBUG_KMS("paddr = 0x%lx\n", (unsigned long)win_data->dma_addr);
DRM_DEBUG_KMS("fb_width = %d, crtc_width = %d\n",
overlay->fb_width, overlay->crtc_width);
}
@@ -451,6 +493,8 @@ static void fimd_win_commit(struct device *dev, int zpos)
struct fimd_win_data *win_data;
int win = zpos;
unsigned long val, alpha, size;
+ unsigned int last_x;
+ unsigned int last_y;
DRM_DEBUG_KMS("%s\n", __FILE__);
@@ -496,24 +540,32 @@ static void fimd_win_commit(struct device *dev, int zpos)
/* buffer size */
val = VIDW_BUF_SIZE_OFFSET(win_data->buf_offsize) |
- VIDW_BUF_SIZE_PAGEWIDTH(win_data->line_size);
+ VIDW_BUF_SIZE_PAGEWIDTH(win_data->line_size) |
+ VIDW_BUF_SIZE_OFFSET_E(win_data->buf_offsize) |
+ VIDW_BUF_SIZE_PAGEWIDTH_E(win_data->line_size);
writel(val, ctx->regs + VIDWx_BUF_SIZE(win, 0));
/* OSD position */
val = VIDOSDxA_TOPLEFT_X(win_data->offset_x) |
- VIDOSDxA_TOPLEFT_Y(win_data->offset_y);
+ VIDOSDxA_TOPLEFT_Y(win_data->offset_y) |
+ VIDOSDxA_TOPLEFT_X_E(win_data->offset_x) |
+ VIDOSDxA_TOPLEFT_Y_E(win_data->offset_y);
writel(val, ctx->regs + VIDOSD_A(win));
- val = VIDOSDxB_BOTRIGHT_X(win_data->offset_x +
- win_data->ovl_width - 1) |
- VIDOSDxB_BOTRIGHT_Y(win_data->offset_y +
- win_data->ovl_height - 1);
+ last_x = win_data->offset_x + win_data->ovl_width;
+ if (last_x)
+ last_x--;
+ last_y = win_data->offset_y + win_data->ovl_height;
+ if (last_y)
+ last_y--;
+
+ val = VIDOSDxB_BOTRIGHT_X(last_x) | VIDOSDxB_BOTRIGHT_Y(last_y) |
+ VIDOSDxB_BOTRIGHT_X_E(last_x) | VIDOSDxB_BOTRIGHT_Y_E(last_y);
+
writel(val, ctx->regs + VIDOSD_B(win));
DRM_DEBUG_KMS("osd pos: tx = %d, ty = %d, bx = %d, by = %d\n",
- win_data->offset_x, win_data->offset_y,
- win_data->offset_x + win_data->ovl_width - 1,
- win_data->offset_y + win_data->ovl_height - 1);
+ win_data->offset_x, win_data->offset_y, last_x, last_y);
/* hardware window 0 doesn't support alpha channel. */
if (win != 0) {
@@ -573,6 +625,12 @@ static void fimd_win_disable(struct device *dev, int zpos)
win_data = &ctx->win_data[win];
+ if (ctx->suspended) {
+ /* do not resume this window*/
+ win_data->resume = false;
+ return;
+ }
+
/* protect windows */
val = readl(ctx->regs + SHADOWCON);
val |= SHADOWCON_WINx_PROTECT(win);
@@ -592,22 +650,10 @@ static void fimd_win_disable(struct device *dev, int zpos)
win_data->enabled = false;
}
-static void fimd_wait_for_vblank(struct device *dev)
-{
- struct fimd_context *ctx = get_fimd_context(dev);
- int ret;
-
- ret = wait_for((__raw_readl(ctx->regs + VIDCON1) &
- VIDCON1_VSTATUS_VSYNC), 50);
- if (ret < 0)
- DRM_DEBUG_KMS("vblank wait timed out.\n");
-}
-
static struct exynos_drm_overlay_ops fimd_overlay_ops = {
.mode_set = fimd_win_mode_set,
.commit = fimd_win_commit,
.disable = fimd_win_disable,
- .wait_for_vblank = fimd_wait_for_vblank,
};
static struct exynos_drm_manager fimd_manager = {
@@ -623,7 +669,6 @@ static void fimd_finish_pageflip(struct drm_device *drm_dev, int crtc)
struct drm_pending_vblank_event *e, *t;
struct timeval now;
unsigned long flags;
- bool is_checked = false;
spin_lock_irqsave(&drm_dev->event_lock, flags);
@@ -633,8 +678,6 @@ static void fimd_finish_pageflip(struct drm_device *drm_dev, int crtc)
if (crtc != e->pipe)
continue;
- is_checked = true;
-
do_gettimeofday(&now);
e->event.sequence = 0;
e->event.tv_sec = now.tv_sec;
@@ -642,22 +685,7 @@ static void fimd_finish_pageflip(struct drm_device *drm_dev, int crtc)
list_move_tail(&e->base.link, &e->base.file_priv->event_list);
wake_up_interruptible(&e->base.file_priv->event_wait);
- }
-
- if (is_checked) {
- /*
- * call drm_vblank_put only in case that drm_vblank_get was
- * called.
- */
- if (atomic_read(&drm_dev->vblank_refcount[crtc]) > 0)
- drm_vblank_put(drm_dev, crtc);
-
- /*
- * don't off vblank if vblank_disable_allowed is 1,
- * because vblank would be off by timer handler.
- */
- if (!drm_dev->vblank_disable_allowed)
- drm_vblank_off(drm_dev, crtc);
+ drm_vblank_put(drm_dev, crtc);
}
spin_unlock_irqrestore(&drm_dev->event_lock, flags);
@@ -684,6 +712,11 @@ static irqreturn_t fimd_irq_handler(int irq, void *dev_id)
drm_handle_vblank(drm_dev, manager->pipe);
fimd_finish_pageflip(drm_dev, manager->pipe);
+ /* set wait vsync event to zero and wake up queue. */
+ if (atomic_read(&ctx->wait_vsync_event)) {
+ atomic_set(&ctx->wait_vsync_event, 0);
+ DRM_WAKEUP(&ctx->wait_vsync_queue);
+ }
out:
return IRQ_HANDLED;
}
@@ -709,6 +742,10 @@ static int fimd_subdrv_probe(struct drm_device *drm_dev, struct device *dev)
*/
drm_dev->vblank_disable_allowed = 1;
+ /* attach this sub driver to iommu mapping if supported. */
+ if (is_drm_iommu_supported(drm_dev))
+ drm_iommu_attach_device(drm_dev, dev);
+
return 0;
}
@@ -716,7 +753,9 @@ static void fimd_subdrv_remove(struct drm_device *drm_dev, struct device *dev)
{
DRM_DEBUG_KMS("%s\n", __FILE__);
- /* TODO. */
+ /* detach this sub driver from iommu mapping if supported. */
+ if (is_drm_iommu_supported(drm_dev))
+ drm_iommu_detach_device(drm_dev, dev);
}
static int fimd_calc_clkdiv(struct fimd_context *ctx,
@@ -805,11 +844,38 @@ static int fimd_clock(struct fimd_context *ctx, bool enable)
return 0;
}
+static void fimd_window_suspend(struct device *dev)
+{
+ struct fimd_context *ctx = get_fimd_context(dev);
+ struct fimd_win_data *win_data;
+ int i;
+
+ for (i = 0; i < WINDOWS_NR; i++) {
+ win_data = &ctx->win_data[i];
+ win_data->resume = win_data->enabled;
+ fimd_win_disable(dev, i);
+ }
+ fimd_wait_for_vblank(dev);
+}
+
+static void fimd_window_resume(struct device *dev)
+{
+ struct fimd_context *ctx = get_fimd_context(dev);
+ struct fimd_win_data *win_data;
+ int i;
+
+ for (i = 0; i < WINDOWS_NR; i++) {
+ win_data = &ctx->win_data[i];
+ win_data->enabled = win_data->resume;
+ win_data->resume = false;
+ }
+}
+
static int fimd_activate(struct fimd_context *ctx, bool enable)
{
+ struct device *dev = ctx->subdrv.dev;
if (enable) {
int ret;
- struct device *dev = ctx->subdrv.dev;
ret = fimd_clock(ctx, true);
if (ret < 0)
@@ -820,7 +886,11 @@ static int fimd_activate(struct fimd_context *ctx, bool enable)
/* if vblank was enabled status, enable it again. */
if (test_and_clear_bit(0, &ctx->irq_flags))
fimd_enable_vblank(dev);
+
+ fimd_window_resume(dev);
} else {
+ fimd_window_suspend(dev);
+
fimd_clock(ctx, false);
ctx->suspended = true;
}
@@ -857,18 +927,16 @@ static int __devinit fimd_probe(struct platform_device *pdev)
if (!ctx)
return -ENOMEM;
- ctx->bus_clk = clk_get(dev, "fimd");
+ ctx->bus_clk = devm_clk_get(dev, "fimd");
if (IS_ERR(ctx->bus_clk)) {
dev_err(dev, "failed to get bus clock\n");
- ret = PTR_ERR(ctx->bus_clk);
- goto err_clk_get;
+ return PTR_ERR(ctx->bus_clk);
}
- ctx->lcd_clk = clk_get(dev, "sclk_fimd");
+ ctx->lcd_clk = devm_clk_get(dev, "sclk_fimd");
if (IS_ERR(ctx->lcd_clk)) {
dev_err(dev, "failed to get lcd clock\n");
- ret = PTR_ERR(ctx->lcd_clk);
- goto err_bus_clk;
+ return PTR_ERR(ctx->lcd_clk);
}
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
@@ -876,14 +944,13 @@ static int __devinit fimd_probe(struct platform_device *pdev)
ctx->regs = devm_request_and_ioremap(&pdev->dev, res);
if (!ctx->regs) {
dev_err(dev, "failed to map registers\n");
- ret = -ENXIO;
- goto err_clk;
+ return -ENXIO;
}
res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
if (!res) {
dev_err(dev, "irq request failed.\n");
- goto err_clk;
+ return -ENXIO;
}
ctx->irq = res->start;
@@ -892,13 +959,15 @@ static int __devinit fimd_probe(struct platform_device *pdev)
0, "drm_fimd", ctx);
if (ret) {
dev_err(dev, "irq request failed.\n");
- goto err_clk;
+ return ret;
}
ctx->vidcon0 = pdata->vidcon0;
ctx->vidcon1 = pdata->vidcon1;
ctx->default_win = pdata->default_win;
ctx->panel = panel;
+ DRM_INIT_WAITQUEUE(&ctx->wait_vsync_queue);
+ atomic_set(&ctx->wait_vsync_event, 0);
subdrv = &ctx->subdrv;
@@ -926,17 +995,6 @@ static int __devinit fimd_probe(struct platform_device *pdev)
exynos_drm_subdrv_register(subdrv);
return 0;
-
-err_clk:
- clk_disable(ctx->lcd_clk);
- clk_put(ctx->lcd_clk);
-
-err_bus_clk:
- clk_disable(ctx->bus_clk);
- clk_put(ctx->bus_clk);
-
-err_clk_get:
- return ret;
}
static int __devexit fimd_remove(struct platform_device *pdev)
@@ -960,9 +1018,6 @@ static int __devexit fimd_remove(struct platform_device *pdev)
out:
pm_runtime_disable(dev);
- clk_put(ctx->lcd_clk);
- clk_put(ctx->bus_clk);
-
return 0;
}
@@ -1056,5 +1111,6 @@ struct platform_driver fimd_driver = {
.name = "exynos4-fb",
.owner = THIS_MODULE,
.pm = &fimd_pm_ops,
+ .of_match_table = of_match_ptr(fimd_driver_dt_match),
},
};
diff --git a/drivers/gpu/drm/exynos/exynos_drm_g2d.c b/drivers/gpu/drm/exynos/exynos_drm_g2d.c
index f7aab24ea46c..6ffa0763c078 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_g2d.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_g2d.c
@@ -17,11 +17,14 @@
#include <linux/pm_runtime.h>
#include <linux/slab.h>
#include <linux/workqueue.h>
+#include <linux/dma-mapping.h>
+#include <linux/dma-attrs.h>
#include <drm/drmP.h>
#include <drm/exynos_drm.h>
#include "exynos_drm_drv.h"
#include "exynos_drm_gem.h"
+#include "exynos_drm_iommu.h"
#define G2D_HW_MAJOR_VER 4
#define G2D_HW_MINOR_VER 1
@@ -92,11 +95,21 @@
#define G2D_CMDLIST_POOL_SIZE (G2D_CMDLIST_SIZE * G2D_CMDLIST_NUM)
#define G2D_CMDLIST_DATA_NUM (G2D_CMDLIST_SIZE / sizeof(u32) - 2)
+#define MAX_BUF_ADDR_NR 6
+
+/* maximum buffer pool size of userptr is 64MB as default */
+#define MAX_POOL (64 * 1024 * 1024)
+
+enum {
+ BUF_TYPE_GEM = 1,
+ BUF_TYPE_USERPTR,
+};
+
/* cmdlist data structure */
struct g2d_cmdlist {
- u32 head;
- u32 data[G2D_CMDLIST_DATA_NUM];
- u32 last; /* last data offset */
+ u32 head;
+ unsigned long data[G2D_CMDLIST_DATA_NUM];
+ u32 last; /* last data offset */
};
struct drm_exynos_pending_g2d_event {
@@ -104,15 +117,26 @@ struct drm_exynos_pending_g2d_event {
struct drm_exynos_g2d_event event;
};
-struct g2d_gem_node {
+struct g2d_cmdlist_userptr {
struct list_head list;
- unsigned int handle;
+ dma_addr_t dma_addr;
+ unsigned long userptr;
+ unsigned long size;
+ struct page **pages;
+ unsigned int npages;
+ struct sg_table *sgt;
+ struct vm_area_struct *vma;
+ atomic_t refcount;
+ bool in_pool;
+ bool out_of_list;
};
struct g2d_cmdlist_node {
struct list_head list;
struct g2d_cmdlist *cmdlist;
- unsigned int gem_nr;
+ unsigned int map_nr;
+ unsigned long handles[MAX_BUF_ADDR_NR];
+ unsigned int obj_type[MAX_BUF_ADDR_NR];
dma_addr_t dma_addr;
struct drm_exynos_pending_g2d_event *event;
@@ -122,6 +146,7 @@ struct g2d_runqueue_node {
struct list_head list;
struct list_head run_cmdlist;
struct list_head event_list;
+ struct drm_file *filp;
pid_t pid;
struct completion complete;
int async;
@@ -143,23 +168,33 @@ struct g2d_data {
struct mutex cmdlist_mutex;
dma_addr_t cmdlist_pool;
void *cmdlist_pool_virt;
+ struct dma_attrs cmdlist_dma_attrs;
/* runqueue*/
struct g2d_runqueue_node *runqueue_node;
struct list_head runqueue;
struct mutex runqueue_mutex;
struct kmem_cache *runqueue_slab;
+
+ unsigned long current_pool;
+ unsigned long max_pool;
};
static int g2d_init_cmdlist(struct g2d_data *g2d)
{
struct device *dev = g2d->dev;
struct g2d_cmdlist_node *node = g2d->cmdlist_node;
+ struct exynos_drm_subdrv *subdrv = &g2d->subdrv;
int nr;
int ret;
- g2d->cmdlist_pool_virt = dma_alloc_coherent(dev, G2D_CMDLIST_POOL_SIZE,
- &g2d->cmdlist_pool, GFP_KERNEL);
+ init_dma_attrs(&g2d->cmdlist_dma_attrs);
+ dma_set_attr(DMA_ATTR_WRITE_COMBINE, &g2d->cmdlist_dma_attrs);
+
+ g2d->cmdlist_pool_virt = dma_alloc_attrs(subdrv->drm_dev->dev,
+ G2D_CMDLIST_POOL_SIZE,
+ &g2d->cmdlist_pool, GFP_KERNEL,
+ &g2d->cmdlist_dma_attrs);
if (!g2d->cmdlist_pool_virt) {
dev_err(dev, "failed to allocate dma memory\n");
return -ENOMEM;
@@ -184,18 +219,20 @@ static int g2d_init_cmdlist(struct g2d_data *g2d)
return 0;
err:
- dma_free_coherent(dev, G2D_CMDLIST_POOL_SIZE, g2d->cmdlist_pool_virt,
- g2d->cmdlist_pool);
+ dma_free_attrs(subdrv->drm_dev->dev, G2D_CMDLIST_POOL_SIZE,
+ g2d->cmdlist_pool_virt,
+ g2d->cmdlist_pool, &g2d->cmdlist_dma_attrs);
return ret;
}
static void g2d_fini_cmdlist(struct g2d_data *g2d)
{
- struct device *dev = g2d->dev;
+ struct exynos_drm_subdrv *subdrv = &g2d->subdrv;
kfree(g2d->cmdlist_node);
- dma_free_coherent(dev, G2D_CMDLIST_POOL_SIZE, g2d->cmdlist_pool_virt,
- g2d->cmdlist_pool);
+ dma_free_attrs(subdrv->drm_dev->dev, G2D_CMDLIST_POOL_SIZE,
+ g2d->cmdlist_pool_virt,
+ g2d->cmdlist_pool, &g2d->cmdlist_dma_attrs);
}
static struct g2d_cmdlist_node *g2d_get_cmdlist(struct g2d_data *g2d)
@@ -245,62 +282,300 @@ add_to_list:
list_add_tail(&node->event->base.link, &g2d_priv->event_list);
}
-static int g2d_get_cmdlist_gem(struct drm_device *drm_dev,
- struct drm_file *file,
- struct g2d_cmdlist_node *node)
+static void g2d_userptr_put_dma_addr(struct drm_device *drm_dev,
+ unsigned long obj,
+ bool force)
{
- struct drm_exynos_file_private *file_priv = file->driver_priv;
+ struct g2d_cmdlist_userptr *g2d_userptr =
+ (struct g2d_cmdlist_userptr *)obj;
+
+ if (!obj)
+ return;
+
+ if (force)
+ goto out;
+
+ atomic_dec(&g2d_userptr->refcount);
+
+ if (atomic_read(&g2d_userptr->refcount) > 0)
+ return;
+
+ if (g2d_userptr->in_pool)
+ return;
+
+out:
+ exynos_gem_unmap_sgt_from_dma(drm_dev, g2d_userptr->sgt,
+ DMA_BIDIRECTIONAL);
+
+ exynos_gem_put_pages_to_userptr(g2d_userptr->pages,
+ g2d_userptr->npages,
+ g2d_userptr->vma);
+
+ if (!g2d_userptr->out_of_list)
+ list_del_init(&g2d_userptr->list);
+
+ sg_free_table(g2d_userptr->sgt);
+ kfree(g2d_userptr->sgt);
+ g2d_userptr->sgt = NULL;
+
+ kfree(g2d_userptr->pages);
+ g2d_userptr->pages = NULL;
+ kfree(g2d_userptr);
+ g2d_userptr = NULL;
+}
+
+dma_addr_t *g2d_userptr_get_dma_addr(struct drm_device *drm_dev,
+ unsigned long userptr,
+ unsigned long size,
+ struct drm_file *filp,
+ unsigned long *obj)
+{
+ struct drm_exynos_file_private *file_priv = filp->driver_priv;
+ struct exynos_drm_g2d_private *g2d_priv = file_priv->g2d_priv;
+ struct g2d_cmdlist_userptr *g2d_userptr;
+ struct g2d_data *g2d;
+ struct page **pages;
+ struct sg_table *sgt;
+ struct vm_area_struct *vma;
+ unsigned long start, end;
+ unsigned int npages, offset;
+ int ret;
+
+ if (!size) {
+ DRM_ERROR("invalid userptr size.\n");
+ return ERR_PTR(-EINVAL);
+ }
+
+ g2d = dev_get_drvdata(g2d_priv->dev);
+
+ /* check if userptr already exists in userptr_list. */
+ list_for_each_entry(g2d_userptr, &g2d_priv->userptr_list, list) {
+ if (g2d_userptr->userptr == userptr) {
+ /*
+ * also check size because there could be same address
+ * and different size.
+ */
+ if (g2d_userptr->size == size) {
+ atomic_inc(&g2d_userptr->refcount);
+ *obj = (unsigned long)g2d_userptr;
+
+ return &g2d_userptr->dma_addr;
+ }
+
+ /*
+ * at this moment, maybe g2d dma is accessing this
+ * g2d_userptr memory region so just remove this
+ * g2d_userptr object from userptr_list not to be
+ * referred again and also except it the userptr
+ * pool to be released after the dma access completion.
+ */
+ g2d_userptr->out_of_list = true;
+ g2d_userptr->in_pool = false;
+ list_del_init(&g2d_userptr->list);
+
+ break;
+ }
+ }
+
+ g2d_userptr = kzalloc(sizeof(*g2d_userptr), GFP_KERNEL);
+ if (!g2d_userptr) {
+ DRM_ERROR("failed to allocate g2d_userptr.\n");
+ return ERR_PTR(-ENOMEM);
+ }
+
+ atomic_set(&g2d_userptr->refcount, 1);
+
+ start = userptr & PAGE_MASK;
+ offset = userptr & ~PAGE_MASK;
+ end = PAGE_ALIGN(userptr + size);
+ npages = (end - start) >> PAGE_SHIFT;
+ g2d_userptr->npages = npages;
+
+ pages = kzalloc(npages * sizeof(struct page *), GFP_KERNEL);
+ if (!pages) {
+ DRM_ERROR("failed to allocate pages.\n");
+ kfree(g2d_userptr);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ vma = find_vma(current->mm, userptr);
+ if (!vma) {
+ DRM_ERROR("failed to get vm region.\n");
+ ret = -EFAULT;
+ goto err_free_pages;
+ }
+
+ if (vma->vm_end < userptr + size) {
+ DRM_ERROR("vma is too small.\n");
+ ret = -EFAULT;
+ goto err_free_pages;
+ }
+
+ g2d_userptr->vma = exynos_gem_get_vma(vma);
+ if (!g2d_userptr->vma) {
+ DRM_ERROR("failed to copy vma.\n");
+ ret = -ENOMEM;
+ goto err_free_pages;
+ }
+
+ g2d_userptr->size = size;
+
+ ret = exynos_gem_get_pages_from_userptr(start & PAGE_MASK,
+ npages, pages, vma);
+ if (ret < 0) {
+ DRM_ERROR("failed to get user pages from userptr.\n");
+ goto err_put_vma;
+ }
+
+ g2d_userptr->pages = pages;
+
+ sgt = kzalloc(sizeof *sgt, GFP_KERNEL);
+ if (!sgt) {
+ DRM_ERROR("failed to allocate sg table.\n");
+ ret = -ENOMEM;
+ goto err_free_userptr;
+ }
+
+ ret = sg_alloc_table_from_pages(sgt, pages, npages, offset,
+ size, GFP_KERNEL);
+ if (ret < 0) {
+ DRM_ERROR("failed to get sgt from pages.\n");
+ goto err_free_sgt;
+ }
+
+ g2d_userptr->sgt = sgt;
+
+ ret = exynos_gem_map_sgt_with_dma(drm_dev, g2d_userptr->sgt,
+ DMA_BIDIRECTIONAL);
+ if (ret < 0) {
+ DRM_ERROR("failed to map sgt with dma region.\n");
+ goto err_free_sgt;
+ }
+
+ g2d_userptr->dma_addr = sgt->sgl[0].dma_address;
+ g2d_userptr->userptr = userptr;
+
+ list_add_tail(&g2d_userptr->list, &g2d_priv->userptr_list);
+
+ if (g2d->current_pool + (npages << PAGE_SHIFT) < g2d->max_pool) {
+ g2d->current_pool += npages << PAGE_SHIFT;
+ g2d_userptr->in_pool = true;
+ }
+
+ *obj = (unsigned long)g2d_userptr;
+
+ return &g2d_userptr->dma_addr;
+
+err_free_sgt:
+ sg_free_table(sgt);
+ kfree(sgt);
+ sgt = NULL;
+
+err_free_userptr:
+ exynos_gem_put_pages_to_userptr(g2d_userptr->pages,
+ g2d_userptr->npages,
+ g2d_userptr->vma);
+
+err_put_vma:
+ exynos_gem_put_vma(g2d_userptr->vma);
+
+err_free_pages:
+ kfree(pages);
+ kfree(g2d_userptr);
+ pages = NULL;
+ g2d_userptr = NULL;
+
+ return ERR_PTR(ret);
+}
+
+static void g2d_userptr_free_all(struct drm_device *drm_dev,
+ struct g2d_data *g2d,
+ struct drm_file *filp)
+{
+ struct drm_exynos_file_private *file_priv = filp->driver_priv;
struct exynos_drm_g2d_private *g2d_priv = file_priv->g2d_priv;
+ struct g2d_cmdlist_userptr *g2d_userptr, *n;
+
+ list_for_each_entry_safe(g2d_userptr, n, &g2d_priv->userptr_list, list)
+ if (g2d_userptr->in_pool)
+ g2d_userptr_put_dma_addr(drm_dev,
+ (unsigned long)g2d_userptr,
+ true);
+
+ g2d->current_pool = 0;
+}
+
+static int g2d_map_cmdlist_gem(struct g2d_data *g2d,
+ struct g2d_cmdlist_node *node,
+ struct drm_device *drm_dev,
+ struct drm_file *file)
+{
struct g2d_cmdlist *cmdlist = node->cmdlist;
- dma_addr_t *addr;
int offset;
int i;
- for (i = 0; i < node->gem_nr; i++) {
- struct g2d_gem_node *gem_node;
-
- gem_node = kzalloc(sizeof(*gem_node), GFP_KERNEL);
- if (!gem_node) {
- dev_err(g2d_priv->dev, "failed to allocate gem node\n");
- return -ENOMEM;
- }
+ for (i = 0; i < node->map_nr; i++) {
+ unsigned long handle;
+ dma_addr_t *addr;
offset = cmdlist->last - (i * 2 + 1);
- gem_node->handle = cmdlist->data[offset];
-
- addr = exynos_drm_gem_get_dma_addr(drm_dev, gem_node->handle,
- file);
- if (IS_ERR(addr)) {
- node->gem_nr = i;
- kfree(gem_node);
- return PTR_ERR(addr);
+ handle = cmdlist->data[offset];
+
+ if (node->obj_type[i] == BUF_TYPE_GEM) {
+ addr = exynos_drm_gem_get_dma_addr(drm_dev, handle,
+ file);
+ if (IS_ERR(addr)) {
+ node->map_nr = i;
+ return -EFAULT;
+ }
+ } else {
+ struct drm_exynos_g2d_userptr g2d_userptr;
+
+ if (copy_from_user(&g2d_userptr, (void __user *)handle,
+ sizeof(struct drm_exynos_g2d_userptr))) {
+ node->map_nr = i;
+ return -EFAULT;
+ }
+
+ addr = g2d_userptr_get_dma_addr(drm_dev,
+ g2d_userptr.userptr,
+ g2d_userptr.size,
+ file,
+ &handle);
+ if (IS_ERR(addr)) {
+ node->map_nr = i;
+ return -EFAULT;
+ }
}
cmdlist->data[offset] = *addr;
- list_add_tail(&gem_node->list, &g2d_priv->gem_list);
- g2d_priv->gem_nr++;
+ node->handles[i] = handle;
}
return 0;
}
-static void g2d_put_cmdlist_gem(struct drm_device *drm_dev,
- struct drm_file *file,
- unsigned int nr)
+static void g2d_unmap_cmdlist_gem(struct g2d_data *g2d,
+ struct g2d_cmdlist_node *node,
+ struct drm_file *filp)
{
- struct drm_exynos_file_private *file_priv = file->driver_priv;
- struct exynos_drm_g2d_private *g2d_priv = file_priv->g2d_priv;
- struct g2d_gem_node *node, *n;
+ struct exynos_drm_subdrv *subdrv = &g2d->subdrv;
+ int i;
- list_for_each_entry_safe_reverse(node, n, &g2d_priv->gem_list, list) {
- if (!nr)
- break;
+ for (i = 0; i < node->map_nr; i++) {
+ unsigned long handle = node->handles[i];
- exynos_drm_gem_put_dma_addr(drm_dev, node->handle, file);
- list_del_init(&node->list);
- kfree(node);
- nr--;
+ if (node->obj_type[i] == BUF_TYPE_GEM)
+ exynos_drm_gem_put_dma_addr(subdrv->drm_dev, handle,
+ filp);
+ else
+ g2d_userptr_put_dma_addr(subdrv->drm_dev, handle,
+ false);
+
+ node->handles[i] = 0;
}
+
+ node->map_nr = 0;
}
static void g2d_dma_start(struct g2d_data *g2d,
@@ -337,10 +612,18 @@ static struct g2d_runqueue_node *g2d_get_runqueue_node(struct g2d_data *g2d)
static void g2d_free_runqueue_node(struct g2d_data *g2d,
struct g2d_runqueue_node *runqueue_node)
{
+ struct g2d_cmdlist_node *node;
+
if (!runqueue_node)
return;
mutex_lock(&g2d->cmdlist_mutex);
+ /*
+ * commands in run_cmdlist have been completed so unmap all gem
+ * objects in each command node so that they are unreferenced.
+ */
+ list_for_each_entry(node, &runqueue_node->run_cmdlist, list)
+ g2d_unmap_cmdlist_gem(g2d, node, runqueue_node->filp);
list_splice_tail_init(&runqueue_node->run_cmdlist, &g2d->free_cmdlist);
mutex_unlock(&g2d->cmdlist_mutex);
@@ -430,15 +713,28 @@ static irqreturn_t g2d_irq_handler(int irq, void *dev_id)
return IRQ_HANDLED;
}
-static int g2d_check_reg_offset(struct device *dev, struct g2d_cmdlist *cmdlist,
+static int g2d_check_reg_offset(struct device *dev,
+ struct g2d_cmdlist_node *node,
int nr, bool for_addr)
{
+ struct g2d_cmdlist *cmdlist = node->cmdlist;
int reg_offset;
int index;
int i;
for (i = 0; i < nr; i++) {
index = cmdlist->last - 2 * (i + 1);
+
+ if (for_addr) {
+ /* check userptr buffer type. */
+ reg_offset = (cmdlist->data[index] &
+ ~0x7fffffff) >> 31;
+ if (reg_offset) {
+ node->obj_type[i] = BUF_TYPE_USERPTR;
+ cmdlist->data[index] &= ~G2D_BUF_USERPTR;
+ }
+ }
+
reg_offset = cmdlist->data[index] & ~0xfffff000;
if (reg_offset < G2D_VALID_START || reg_offset > G2D_VALID_END)
@@ -455,6 +751,9 @@ static int g2d_check_reg_offset(struct device *dev, struct g2d_cmdlist *cmdlist,
case G2D_MSK_BASE_ADDR:
if (!for_addr)
goto err;
+
+ if (node->obj_type[i] != BUF_TYPE_USERPTR)
+ node->obj_type[i] = BUF_TYPE_GEM;
break;
default:
if (for_addr)
@@ -466,7 +765,7 @@ static int g2d_check_reg_offset(struct device *dev, struct g2d_cmdlist *cmdlist,
return 0;
err:
- dev_err(dev, "Bad register offset: 0x%x\n", cmdlist->data[index]);
+ dev_err(dev, "Bad register offset: 0x%lx\n", cmdlist->data[index]);
return -EINVAL;
}
@@ -566,7 +865,7 @@ int exynos_g2d_set_cmdlist_ioctl(struct drm_device *drm_dev, void *data,
}
/* Check size of cmdlist: last 2 is about G2D_BITBLT_START */
- size = cmdlist->last + req->cmd_nr * 2 + req->cmd_gem_nr * 2 + 2;
+ size = cmdlist->last + req->cmd_nr * 2 + req->cmd_buf_nr * 2 + 2;
if (size > G2D_CMDLIST_DATA_NUM) {
dev_err(dev, "cmdlist size is too big\n");
ret = -EINVAL;
@@ -583,29 +882,29 @@ int exynos_g2d_set_cmdlist_ioctl(struct drm_device *drm_dev, void *data,
}
cmdlist->last += req->cmd_nr * 2;
- ret = g2d_check_reg_offset(dev, cmdlist, req->cmd_nr, false);
+ ret = g2d_check_reg_offset(dev, node, req->cmd_nr, false);
if (ret < 0)
goto err_free_event;
- node->gem_nr = req->cmd_gem_nr;
- if (req->cmd_gem_nr) {
- struct drm_exynos_g2d_cmd *cmd_gem;
+ node->map_nr = req->cmd_buf_nr;
+ if (req->cmd_buf_nr) {
+ struct drm_exynos_g2d_cmd *cmd_buf;
- cmd_gem = (struct drm_exynos_g2d_cmd *)(uint32_t)req->cmd_gem;
+ cmd_buf = (struct drm_exynos_g2d_cmd *)(uint32_t)req->cmd_buf;
if (copy_from_user(cmdlist->data + cmdlist->last,
- (void __user *)cmd_gem,
- sizeof(*cmd_gem) * req->cmd_gem_nr)) {
+ (void __user *)cmd_buf,
+ sizeof(*cmd_buf) * req->cmd_buf_nr)) {
ret = -EFAULT;
goto err_free_event;
}
- cmdlist->last += req->cmd_gem_nr * 2;
+ cmdlist->last += req->cmd_buf_nr * 2;
- ret = g2d_check_reg_offset(dev, cmdlist, req->cmd_gem_nr, true);
+ ret = g2d_check_reg_offset(dev, node, req->cmd_buf_nr, true);
if (ret < 0)
goto err_free_event;
- ret = g2d_get_cmdlist_gem(drm_dev, file, node);
+ ret = g2d_map_cmdlist_gem(g2d, node, drm_dev, file);
if (ret < 0)
goto err_unmap;
}
@@ -624,7 +923,7 @@ int exynos_g2d_set_cmdlist_ioctl(struct drm_device *drm_dev, void *data,
return 0;
err_unmap:
- g2d_put_cmdlist_gem(drm_dev, file, node->gem_nr);
+ g2d_unmap_cmdlist_gem(g2d, node, file);
err_free_event:
if (node->event) {
spin_lock_irqsave(&drm_dev->event_lock, flags);
@@ -680,6 +979,7 @@ int exynos_g2d_exec_ioctl(struct drm_device *drm_dev, void *data,
mutex_lock(&g2d->runqueue_mutex);
runqueue_node->pid = current->pid;
+ runqueue_node->filp = file;
list_add_tail(&runqueue_node->list, &g2d->runqueue);
if (!g2d->runqueue_node)
g2d_exec_runqueue(g2d);
@@ -696,6 +996,43 @@ out:
}
EXPORT_SYMBOL_GPL(exynos_g2d_exec_ioctl);
+static int g2d_subdrv_probe(struct drm_device *drm_dev, struct device *dev)
+{
+ struct g2d_data *g2d;
+ int ret;
+
+ g2d = dev_get_drvdata(dev);
+ if (!g2d)
+ return -EFAULT;
+
+ /* allocate dma-aware cmdlist buffer. */
+ ret = g2d_init_cmdlist(g2d);
+ if (ret < 0) {
+ dev_err(dev, "cmdlist init failed\n");
+ return ret;
+ }
+
+ if (!is_drm_iommu_supported(drm_dev))
+ return 0;
+
+ ret = drm_iommu_attach_device(drm_dev, dev);
+ if (ret < 0) {
+ dev_err(dev, "failed to enable iommu.\n");
+ g2d_fini_cmdlist(g2d);
+ }
+
+ return ret;
+
+}
+
+static void g2d_subdrv_remove(struct drm_device *drm_dev, struct device *dev)
+{
+ if (!is_drm_iommu_supported(drm_dev))
+ return;
+
+ drm_iommu_detach_device(drm_dev, dev);
+}
+
static int g2d_open(struct drm_device *drm_dev, struct device *dev,
struct drm_file *file)
{
@@ -713,7 +1050,7 @@ static int g2d_open(struct drm_device *drm_dev, struct device *dev,
INIT_LIST_HEAD(&g2d_priv->inuse_cmdlist);
INIT_LIST_HEAD(&g2d_priv->event_list);
- INIT_LIST_HEAD(&g2d_priv->gem_list);
+ INIT_LIST_HEAD(&g2d_priv->userptr_list);
return 0;
}
@@ -734,11 +1071,21 @@ static void g2d_close(struct drm_device *drm_dev, struct device *dev,
return;
mutex_lock(&g2d->cmdlist_mutex);
- list_for_each_entry_safe(node, n, &g2d_priv->inuse_cmdlist, list)
+ list_for_each_entry_safe(node, n, &g2d_priv->inuse_cmdlist, list) {
+ /*
+ * unmap all gem objects not completed.
+ *
+ * P.S. if current process was terminated forcely then
+ * there may be some commands in inuse_cmdlist so unmap
+ * them.
+ */
+ g2d_unmap_cmdlist_gem(g2d, node, file);
list_move_tail(&node->list, &g2d->free_cmdlist);
+ }
mutex_unlock(&g2d->cmdlist_mutex);
- g2d_put_cmdlist_gem(drm_dev, file, g2d_priv->gem_nr);
+ /* release all g2d_userptr in pool. */
+ g2d_userptr_free_all(drm_dev, g2d, file);
kfree(file_priv->g2d_priv);
}
@@ -778,15 +1125,11 @@ static int __devinit g2d_probe(struct platform_device *pdev)
mutex_init(&g2d->cmdlist_mutex);
mutex_init(&g2d->runqueue_mutex);
- ret = g2d_init_cmdlist(g2d);
- if (ret < 0)
- goto err_destroy_workqueue;
-
- g2d->gate_clk = clk_get(dev, "fimg2d");
+ g2d->gate_clk = devm_clk_get(dev, "fimg2d");
if (IS_ERR(g2d->gate_clk)) {
dev_err(dev, "failed to get gate clock\n");
ret = PTR_ERR(g2d->gate_clk);
- goto err_fini_cmdlist;
+ goto err_destroy_workqueue;
}
pm_runtime_enable(dev);
@@ -814,10 +1157,14 @@ static int __devinit g2d_probe(struct platform_device *pdev)
goto err_put_clk;
}
+ g2d->max_pool = MAX_POOL;
+
platform_set_drvdata(pdev, g2d);
subdrv = &g2d->subdrv;
subdrv->dev = dev;
+ subdrv->probe = g2d_subdrv_probe;
+ subdrv->remove = g2d_subdrv_remove;
subdrv->open = g2d_open;
subdrv->close = g2d_close;
@@ -834,9 +1181,6 @@ static int __devinit g2d_probe(struct platform_device *pdev)
err_put_clk:
pm_runtime_disable(dev);
- clk_put(g2d->gate_clk);
-err_fini_cmdlist:
- g2d_fini_cmdlist(g2d);
err_destroy_workqueue:
destroy_workqueue(g2d->g2d_workq);
err_destroy_slab:
@@ -857,7 +1201,6 @@ static int __devexit g2d_remove(struct platform_device *pdev)
}
pm_runtime_disable(&pdev->dev);
- clk_put(g2d->gate_clk);
g2d_fini_cmdlist(g2d);
destroy_workqueue(g2d->g2d_workq);
diff --git a/drivers/gpu/drm/exynos/exynos_drm_gem.c b/drivers/gpu/drm/exynos/exynos_drm_gem.c
index d2545560664f..d48183e7e056 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_gem.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_gem.c
@@ -83,157 +83,40 @@ static void update_vm_cache_attr(struct exynos_drm_gem_obj *obj,
static unsigned long roundup_gem_size(unsigned long size, unsigned int flags)
{
- if (!IS_NONCONTIG_BUFFER(flags)) {
- if (size >= SZ_1M)
- return roundup(size, SECTION_SIZE);
- else if (size >= SZ_64K)
- return roundup(size, SZ_64K);
- else
- goto out;
- }
-out:
- return roundup(size, PAGE_SIZE);
-}
-
-struct page **exynos_gem_get_pages(struct drm_gem_object *obj,
- gfp_t gfpmask)
-{
- struct page *p, **pages;
- int i, npages;
-
- npages = obj->size >> PAGE_SHIFT;
-
- pages = drm_malloc_ab(npages, sizeof(struct page *));
- if (pages == NULL)
- return ERR_PTR(-ENOMEM);
-
- for (i = 0; i < npages; i++) {
- p = alloc_page(gfpmask);
- if (IS_ERR(p))
- goto fail;
- pages[i] = p;
- }
-
- return pages;
-
-fail:
- while (--i)
- __free_page(pages[i]);
-
- drm_free_large(pages);
- return ERR_CAST(p);
-}
-
-static void exynos_gem_put_pages(struct drm_gem_object *obj,
- struct page **pages)
-{
- int npages;
-
- npages = obj->size >> PAGE_SHIFT;
-
- while (--npages >= 0)
- __free_page(pages[npages]);
+ /* TODO */
- drm_free_large(pages);
+ return roundup(size, PAGE_SIZE);
}
-static int exynos_drm_gem_map_pages(struct drm_gem_object *obj,
+static int exynos_drm_gem_map_buf(struct drm_gem_object *obj,
struct vm_area_struct *vma,
unsigned long f_vaddr,
pgoff_t page_offset)
{
struct exynos_drm_gem_obj *exynos_gem_obj = to_exynos_gem_obj(obj);
struct exynos_drm_gem_buf *buf = exynos_gem_obj->buffer;
+ struct scatterlist *sgl;
unsigned long pfn;
+ int i;
- if (exynos_gem_obj->flags & EXYNOS_BO_NONCONTIG) {
- if (!buf->pages)
- return -EINTR;
-
- pfn = page_to_pfn(buf->pages[page_offset++]);
- } else
- pfn = (buf->dma_addr >> PAGE_SHIFT) + page_offset;
-
- return vm_insert_mixed(vma, f_vaddr, pfn);
-}
+ if (!buf->sgt)
+ return -EINTR;
-static int exynos_drm_gem_get_pages(struct drm_gem_object *obj)
-{
- struct exynos_drm_gem_obj *exynos_gem_obj = to_exynos_gem_obj(obj);
- struct exynos_drm_gem_buf *buf = exynos_gem_obj->buffer;
- struct scatterlist *sgl;
- struct page **pages;
- unsigned int npages, i = 0;
- int ret;
-
- if (buf->pages) {
- DRM_DEBUG_KMS("already allocated.\n");
+ if (page_offset >= (buf->size >> PAGE_SHIFT)) {
+ DRM_ERROR("invalid page offset\n");
return -EINVAL;
}
- pages = exynos_gem_get_pages(obj, GFP_HIGHUSER_MOVABLE);
- if (IS_ERR(pages)) {
- DRM_ERROR("failed to get pages.\n");
- return PTR_ERR(pages);
- }
-
- npages = obj->size >> PAGE_SHIFT;
- buf->page_size = PAGE_SIZE;
-
- buf->sgt = kzalloc(sizeof(struct sg_table), GFP_KERNEL);
- if (!buf->sgt) {
- DRM_ERROR("failed to allocate sg table.\n");
- ret = -ENOMEM;
- goto err;
- }
-
- ret = sg_alloc_table(buf->sgt, npages, GFP_KERNEL);
- if (ret < 0) {
- DRM_ERROR("failed to initialize sg table.\n");
- ret = -EFAULT;
- goto err1;
- }
-
sgl = buf->sgt->sgl;
-
- /* set all pages to sg list. */
- while (i < npages) {
- sg_set_page(sgl, pages[i], PAGE_SIZE, 0);
- sg_dma_address(sgl) = page_to_phys(pages[i]);
- i++;
- sgl = sg_next(sgl);
+ for_each_sg(buf->sgt->sgl, sgl, buf->sgt->nents, i) {
+ if (page_offset < (sgl->length >> PAGE_SHIFT))
+ break;
+ page_offset -= (sgl->length >> PAGE_SHIFT);
}
- /* add some codes for UNCACHED type here. TODO */
-
- buf->pages = pages;
- return ret;
-err1:
- kfree(buf->sgt);
- buf->sgt = NULL;
-err:
- exynos_gem_put_pages(obj, pages);
- return ret;
-
-}
-
-static void exynos_drm_gem_put_pages(struct drm_gem_object *obj)
-{
- struct exynos_drm_gem_obj *exynos_gem_obj = to_exynos_gem_obj(obj);
- struct exynos_drm_gem_buf *buf = exynos_gem_obj->buffer;
-
- /*
- * if buffer typs is EXYNOS_BO_NONCONTIG then release all pages
- * allocated at gem fault handler.
- */
- sg_free_table(buf->sgt);
- kfree(buf->sgt);
- buf->sgt = NULL;
-
- exynos_gem_put_pages(obj, buf->pages);
- buf->pages = NULL;
+ pfn = __phys_to_pfn(sg_phys(sgl)) + page_offset;
- /* add some codes for UNCACHED type here. TODO */
+ return vm_insert_mixed(vma, f_vaddr, pfn);
}
static int exynos_drm_gem_handle_create(struct drm_gem_object *obj,
@@ -270,9 +153,6 @@ void exynos_drm_gem_destroy(struct exynos_drm_gem_obj *exynos_gem_obj)
DRM_DEBUG_KMS("handle count = %d\n", atomic_read(&obj->handle_count));
- if (!buf->pages)
- return;
-
/*
* do not release memory region from exporter.
*
@@ -282,10 +162,7 @@ void exynos_drm_gem_destroy(struct exynos_drm_gem_obj *exynos_gem_obj)
if (obj->import_attach)
goto out;
- if (exynos_gem_obj->flags & EXYNOS_BO_NONCONTIG)
- exynos_drm_gem_put_pages(obj);
- else
- exynos_drm_free_buf(obj->dev, exynos_gem_obj->flags, buf);
+ exynos_drm_free_buf(obj->dev, exynos_gem_obj->flags, buf);
out:
exynos_drm_fini_buf(obj->dev, buf);
@@ -364,22 +241,10 @@ struct exynos_drm_gem_obj *exynos_drm_gem_create(struct drm_device *dev,
/* set memory type and cache attribute from user side. */
exynos_gem_obj->flags = flags;
- /*
- * allocate all pages as desired size if user wants to allocate
- * physically non-continuous memory.
- */
- if (flags & EXYNOS_BO_NONCONTIG) {
- ret = exynos_drm_gem_get_pages(&exynos_gem_obj->base);
- if (ret < 0) {
- drm_gem_object_release(&exynos_gem_obj->base);
- goto err_fini_buf;
- }
- } else {
- ret = exynos_drm_alloc_buf(dev, buf, flags);
- if (ret < 0) {
- drm_gem_object_release(&exynos_gem_obj->base);
- goto err_fini_buf;
- }
+ ret = exynos_drm_alloc_buf(dev, buf, flags);
+ if (ret < 0) {
+ drm_gem_object_release(&exynos_gem_obj->base);
+ goto err_fini_buf;
}
return exynos_gem_obj;
@@ -412,14 +277,14 @@ int exynos_drm_gem_create_ioctl(struct drm_device *dev, void *data,
return 0;
}
-void *exynos_drm_gem_get_dma_addr(struct drm_device *dev,
+dma_addr_t *exynos_drm_gem_get_dma_addr(struct drm_device *dev,
unsigned int gem_handle,
- struct drm_file *file_priv)
+ struct drm_file *filp)
{
struct exynos_drm_gem_obj *exynos_gem_obj;
struct drm_gem_object *obj;
- obj = drm_gem_object_lookup(dev, file_priv, gem_handle);
+ obj = drm_gem_object_lookup(dev, filp, gem_handle);
if (!obj) {
DRM_ERROR("failed to lookup gem object.\n");
return ERR_PTR(-EINVAL);
@@ -427,25 +292,17 @@ void *exynos_drm_gem_get_dma_addr(struct drm_device *dev,
exynos_gem_obj = to_exynos_gem_obj(obj);
- if (exynos_gem_obj->flags & EXYNOS_BO_NONCONTIG) {
- DRM_DEBUG_KMS("not support NONCONTIG type.\n");
- drm_gem_object_unreference_unlocked(obj);
-
- /* TODO */
- return ERR_PTR(-EINVAL);
- }
-
return &exynos_gem_obj->buffer->dma_addr;
}
void exynos_drm_gem_put_dma_addr(struct drm_device *dev,
unsigned int gem_handle,
- struct drm_file *file_priv)
+ struct drm_file *filp)
{
struct exynos_drm_gem_obj *exynos_gem_obj;
struct drm_gem_object *obj;
- obj = drm_gem_object_lookup(dev, file_priv, gem_handle);
+ obj = drm_gem_object_lookup(dev, filp, gem_handle);
if (!obj) {
DRM_ERROR("failed to lookup gem object.\n");
return;
@@ -453,14 +310,6 @@ void exynos_drm_gem_put_dma_addr(struct drm_device *dev,
exynos_gem_obj = to_exynos_gem_obj(obj);
- if (exynos_gem_obj->flags & EXYNOS_BO_NONCONTIG) {
- DRM_DEBUG_KMS("not support NONCONTIG type.\n");
- drm_gem_object_unreference_unlocked(obj);
-
- /* TODO */
- return;
- }
-
drm_gem_object_unreference_unlocked(obj);
/*
@@ -489,22 +338,57 @@ int exynos_drm_gem_map_offset_ioctl(struct drm_device *dev, void *data,
&args->offset);
}
+static struct drm_file *exynos_drm_find_drm_file(struct drm_device *drm_dev,
+ struct file *filp)
+{
+ struct drm_file *file_priv;
+
+ mutex_lock(&drm_dev->struct_mutex);
+
+ /* find current process's drm_file from filelist. */
+ list_for_each_entry(file_priv, &drm_dev->filelist, lhead) {
+ if (file_priv->filp == filp) {
+ mutex_unlock(&drm_dev->struct_mutex);
+ return file_priv;
+ }
+ }
+
+ mutex_unlock(&drm_dev->struct_mutex);
+ WARN_ON(1);
+
+ return ERR_PTR(-EFAULT);
+}
+
static int exynos_drm_gem_mmap_buffer(struct file *filp,
struct vm_area_struct *vma)
{
struct drm_gem_object *obj = filp->private_data;
struct exynos_drm_gem_obj *exynos_gem_obj = to_exynos_gem_obj(obj);
+ struct drm_device *drm_dev = obj->dev;
struct exynos_drm_gem_buf *buffer;
- unsigned long pfn, vm_size, usize, uaddr = vma->vm_start;
+ struct drm_file *file_priv;
+ unsigned long vm_size;
int ret;
DRM_DEBUG_KMS("%s\n", __FILE__);
vma->vm_flags |= VM_IO | VM_DONTEXPAND | VM_DONTDUMP;
+ vma->vm_private_data = obj;
+ vma->vm_ops = drm_dev->driver->gem_vm_ops;
+
+ /* restore it to driver's fops. */
+ filp->f_op = fops_get(drm_dev->driver->fops);
+
+ file_priv = exynos_drm_find_drm_file(drm_dev, filp);
+ if (IS_ERR(file_priv))
+ return PTR_ERR(file_priv);
+
+ /* restore it to drm_file. */
+ filp->private_data = file_priv;
update_vm_cache_attr(exynos_gem_obj, vma);
- vm_size = usize = vma->vm_end - vma->vm_start;
+ vm_size = vma->vm_end - vma->vm_start;
/*
* a buffer contains information to physically continuous memory
@@ -516,40 +400,23 @@ static int exynos_drm_gem_mmap_buffer(struct file *filp,
if (vm_size > buffer->size)
return -EINVAL;
- if (exynos_gem_obj->flags & EXYNOS_BO_NONCONTIG) {
- int i = 0;
-
- if (!buffer->pages)
- return -EINVAL;
+ ret = dma_mmap_attrs(drm_dev->dev, vma, buffer->pages,
+ buffer->dma_addr, buffer->size,
+ &buffer->dma_attrs);
+ if (ret < 0) {
+ DRM_ERROR("failed to mmap.\n");
+ return ret;
+ }
- vma->vm_flags |= VM_MIXEDMAP;
+ /*
+ * take a reference to this mapping of the object. And this reference
+ * is unreferenced by the corresponding vm_close call.
+ */
+ drm_gem_object_reference(obj);
- do {
- ret = vm_insert_page(vma, uaddr, buffer->pages[i++]);
- if (ret) {
- DRM_ERROR("failed to remap user space.\n");
- return ret;
- }
-
- uaddr += PAGE_SIZE;
- usize -= PAGE_SIZE;
- } while (usize > 0);
- } else {
- /*
- * get page frame number to physical memory to be mapped
- * to user space.
- */
- pfn = ((unsigned long)exynos_gem_obj->buffer->dma_addr) >>
- PAGE_SHIFT;
-
- DRM_DEBUG_KMS("pfn = 0x%lx\n", pfn);
-
- if (remap_pfn_range(vma, vma->vm_start, pfn, vm_size,
- vma->vm_page_prot)) {
- DRM_ERROR("failed to remap pfn range.\n");
- return -EAGAIN;
- }
- }
+ mutex_lock(&drm_dev->struct_mutex);
+ drm_vm_open_locked(drm_dev, vma);
+ mutex_unlock(&drm_dev->struct_mutex);
return 0;
}
@@ -578,16 +445,29 @@ int exynos_drm_gem_mmap_ioctl(struct drm_device *dev, void *data,
return -EINVAL;
}
- obj->filp->f_op = &exynos_drm_gem_fops;
- obj->filp->private_data = obj;
+ /*
+ * Set specific mmper's fops. And it will be restored by
+ * exynos_drm_gem_mmap_buffer to dev->driver->fops.
+ * This is used to call specific mapper temporarily.
+ */
+ file_priv->filp->f_op = &exynos_drm_gem_fops;
- addr = vm_mmap(obj->filp, 0, args->size,
+ /*
+ * Set gem object to private_data so that specific mmaper
+ * can get the gem object. And it will be restored by
+ * exynos_drm_gem_mmap_buffer to drm_file.
+ */
+ file_priv->filp->private_data = obj;
+
+ addr = vm_mmap(file_priv->filp, 0, args->size,
PROT_READ | PROT_WRITE, MAP_SHARED, 0);
drm_gem_object_unreference_unlocked(obj);
- if (IS_ERR((void *)addr))
+ if (IS_ERR((void *)addr)) {
+ file_priv->filp->private_data = file_priv;
return PTR_ERR((void *)addr);
+ }
args->mapped = addr;
@@ -622,6 +502,129 @@ int exynos_drm_gem_get_ioctl(struct drm_device *dev, void *data,
return 0;
}
+struct vm_area_struct *exynos_gem_get_vma(struct vm_area_struct *vma)
+{
+ struct vm_area_struct *vma_copy;
+
+ vma_copy = kmalloc(sizeof(*vma_copy), GFP_KERNEL);
+ if (!vma_copy)
+ return NULL;
+
+ if (vma->vm_ops && vma->vm_ops->open)
+ vma->vm_ops->open(vma);
+
+ if (vma->vm_file)
+ get_file(vma->vm_file);
+
+ memcpy(vma_copy, vma, sizeof(*vma));
+
+ vma_copy->vm_mm = NULL;
+ vma_copy->vm_next = NULL;
+ vma_copy->vm_prev = NULL;
+
+ return vma_copy;
+}
+
+void exynos_gem_put_vma(struct vm_area_struct *vma)
+{
+ if (!vma)
+ return;
+
+ if (vma->vm_ops && vma->vm_ops->close)
+ vma->vm_ops->close(vma);
+
+ if (vma->vm_file)
+ fput(vma->vm_file);
+
+ kfree(vma);
+}
+
+int exynos_gem_get_pages_from_userptr(unsigned long start,
+ unsigned int npages,
+ struct page **pages,
+ struct vm_area_struct *vma)
+{
+ int get_npages;
+
+ /* the memory region mmaped with VM_PFNMAP. */
+ if (vma_is_io(vma)) {
+ unsigned int i;
+
+ for (i = 0; i < npages; ++i, start += PAGE_SIZE) {
+ unsigned long pfn;
+ int ret = follow_pfn(vma, start, &pfn);
+ if (ret)
+ return ret;
+
+ pages[i] = pfn_to_page(pfn);
+ }
+
+ if (i != npages) {
+ DRM_ERROR("failed to get user_pages.\n");
+ return -EINVAL;
+ }
+
+ return 0;
+ }
+
+ get_npages = get_user_pages(current, current->mm, start,
+ npages, 1, 1, pages, NULL);
+ get_npages = max(get_npages, 0);
+ if (get_npages != npages) {
+ DRM_ERROR("failed to get user_pages.\n");
+ while (get_npages)
+ put_page(pages[--get_npages]);
+ return -EFAULT;
+ }
+
+ return 0;
+}
+
+void exynos_gem_put_pages_to_userptr(struct page **pages,
+ unsigned int npages,
+ struct vm_area_struct *vma)
+{
+ if (!vma_is_io(vma)) {
+ unsigned int i;
+
+ for (i = 0; i < npages; i++) {
+ set_page_dirty_lock(pages[i]);
+
+ /*
+ * undo the reference we took when populating
+ * the table.
+ */
+ put_page(pages[i]);
+ }
+ }
+}
+
+int exynos_gem_map_sgt_with_dma(struct drm_device *drm_dev,
+ struct sg_table *sgt,
+ enum dma_data_direction dir)
+{
+ int nents;
+
+ mutex_lock(&drm_dev->struct_mutex);
+
+ nents = dma_map_sg(drm_dev->dev, sgt->sgl, sgt->nents, dir);
+ if (!nents) {
+ DRM_ERROR("failed to map sgl with dma.\n");
+ mutex_unlock(&drm_dev->struct_mutex);
+ return nents;
+ }
+
+ mutex_unlock(&drm_dev->struct_mutex);
+ return 0;
+}
+
+void exynos_gem_unmap_sgt_from_dma(struct drm_device *drm_dev,
+ struct sg_table *sgt,
+ enum dma_data_direction dir)
+{
+ dma_unmap_sg(drm_dev->dev, sgt->sgl, sgt->nents, dir);
+}
+
int exynos_drm_gem_init_object(struct drm_gem_object *obj)
{
DRM_DEBUG_KMS("%s\n", __FILE__);
@@ -753,9 +756,9 @@ int exynos_drm_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
mutex_lock(&dev->struct_mutex);
- ret = exynos_drm_gem_map_pages(obj, vma, f_vaddr, page_offset);
+ ret = exynos_drm_gem_map_buf(obj, vma, f_vaddr, page_offset);
if (ret < 0)
- DRM_ERROR("failed to map pages.\n");
+ DRM_ERROR("failed to map a buffer with user.\n");
mutex_unlock(&dev->struct_mutex);
diff --git a/drivers/gpu/drm/exynos/exynos_drm_gem.h b/drivers/gpu/drm/exynos/exynos_drm_gem.h
index 085b2a5d5f70..f11f2afd5bfc 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_gem.h
+++ b/drivers/gpu/drm/exynos/exynos_drm_gem.h
@@ -35,21 +35,27 @@
* exynos drm gem buffer structure.
*
* @kvaddr: kernel virtual address to allocated memory region.
+ * *userptr: user space address.
* @dma_addr: bus address(accessed by dma) to allocated memory region.
* - this address could be physical address without IOMMU and
* device address with IOMMU.
+ * @write: whether pages will be written to by the caller.
+ * @pages: Array of backing pages.
* @sgt: sg table to transfer page data.
- * @pages: contain all pages to allocated memory region.
- * @page_size: could be 4K, 64K or 1MB.
* @size: size of allocated memory region.
+ * @pfnmap: indicate whether memory region from userptr is mmaped with
+ * VM_PFNMAP or not.
*/
struct exynos_drm_gem_buf {
void __iomem *kvaddr;
+ unsigned long userptr;
dma_addr_t dma_addr;
- struct sg_table *sgt;
+ struct dma_attrs dma_attrs;
+ unsigned int write;
struct page **pages;
- unsigned long page_size;
+ struct sg_table *sgt;
unsigned long size;
+ bool pfnmap;
};
/*
@@ -65,6 +71,7 @@ struct exynos_drm_gem_buf {
* or at framebuffer creation.
* @size: size requested from user, in bytes and this size is aligned
* in page unit.
+ * @vma: a pointer to vm_area.
* @flags: indicate memory type to allocated buffer and cache attruibute.
*
* P.S. this object would be transfered to user as kms_bo.handle so
@@ -74,6 +81,7 @@ struct exynos_drm_gem_obj {
struct drm_gem_object base;
struct exynos_drm_gem_buf *buffer;
unsigned long size;
+ struct vm_area_struct *vma;
unsigned int flags;
};
@@ -104,9 +112,9 @@ int exynos_drm_gem_create_ioctl(struct drm_device *dev, void *data,
* other drivers such as 2d/3d acceleration drivers.
* with this function call, gem object reference count would be increased.
*/
-void *exynos_drm_gem_get_dma_addr(struct drm_device *dev,
+dma_addr_t *exynos_drm_gem_get_dma_addr(struct drm_device *dev,
unsigned int gem_handle,
- struct drm_file *file_priv);
+ struct drm_file *filp);
/*
* put dma address from gem handle and this function could be used for
@@ -115,7 +123,7 @@ void *exynos_drm_gem_get_dma_addr(struct drm_device *dev,
*/
void exynos_drm_gem_put_dma_addr(struct drm_device *dev,
unsigned int gem_handle,
- struct drm_file *file_priv);
+ struct drm_file *filp);
/* get buffer offset to map to user space. */
int exynos_drm_gem_map_offset_ioctl(struct drm_device *dev, void *data,
@@ -128,6 +136,10 @@ int exynos_drm_gem_map_offset_ioctl(struct drm_device *dev, void *data,
int exynos_drm_gem_mmap_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
+/* map user space allocated by malloc to pages. */
+int exynos_drm_gem_userptr_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv);
+
/* get buffer information to memory region allocated by gem. */
int exynos_drm_gem_get_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
@@ -163,4 +175,36 @@ int exynos_drm_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf);
/* set vm_flags and we can change the vm attribute to other one at here. */
int exynos_drm_gem_mmap(struct file *filp, struct vm_area_struct *vma);
+static inline int vma_is_io(struct vm_area_struct *vma)
+{
+ return !!(vma->vm_flags & (VM_IO | VM_PFNMAP));
+}
+
+/* get a copy of a virtual memory region. */
+struct vm_area_struct *exynos_gem_get_vma(struct vm_area_struct *vma);
+
+/* release a userspace virtual memory area. */
+void exynos_gem_put_vma(struct vm_area_struct *vma);
+
+/* get pages from user space. */
+int exynos_gem_get_pages_from_userptr(unsigned long start,
+ unsigned int npages,
+ struct page **pages,
+ struct vm_area_struct *vma);
+
+/* drop the reference to pages. */
+void exynos_gem_put_pages_to_userptr(struct page **pages,
+ unsigned int npages,
+ struct vm_area_struct *vma);
+
+/* map sgt with dma region. */
+int exynos_gem_map_sgt_with_dma(struct drm_device *drm_dev,
+ struct sg_table *sgt,
+ enum dma_data_direction dir);
+
+/* unmap sgt from dma region. */
+void exynos_gem_unmap_sgt_from_dma(struct drm_device *drm_dev,
+ struct sg_table *sgt,
+ enum dma_data_direction dir);
+
#endif
diff --git a/drivers/gpu/drm/exynos/exynos_drm_gsc.c b/drivers/gpu/drm/exynos/exynos_drm_gsc.c
new file mode 100644
index 000000000000..5639353d47b9
--- /dev/null
+++ b/drivers/gpu/drm/exynos/exynos_drm_gsc.c
@@ -0,0 +1,1870 @@
+/*
+ * Copyright (C) 2012 Samsung Electronics Co.Ltd
+ * Authors:
+ * Eunchul Kim <chulspro.kim@samsung.com>
+ * Jinyoung Jeon <jy0.jeon@samsung.com>
+ * Sangmin Lee <lsmin.lee@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ */
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/clk.h>
+#include <linux/pm_runtime.h>
+#include <plat/map-base.h>
+
+#include <drm/drmP.h>
+#include <drm/exynos_drm.h>
+#include "regs-gsc.h"
+#include "exynos_drm_ipp.h"
+#include "exynos_drm_gsc.h"
+
+/*
+ * GSC is stand for General SCaler and
+ * supports image scaler/rotator and input/output DMA operations.
+ * input DMA reads image data from the memory.
+ * output DMA writes image data to memory.
+ * GSC supports image rotation and image effect functions.
+ *
+ * M2M operation : supports crop/scale/rotation/csc so on.
+ * Memory ----> GSC H/W ----> Memory.
+ * Writeback operation : supports cloned screen with FIMD.
+ * FIMD ----> GSC H/W ----> Memory.
+ * Output operation : supports direct display using local path.
+ * Memory ----> GSC H/W ----> FIMD, Mixer.
+ */
+
+/*
+ * TODO
+ * 1. check suspend/resume api if needed.
+ * 2. need to check use case platform_device_id.
+ * 3. check src/dst size with, height.
+ * 4. added check_prepare api for right register.
+ * 5. need to add supported list in prop_list.
+ * 6. check prescaler/scaler optimization.
+ */
+
+#define GSC_MAX_DEVS 4
+#define GSC_MAX_SRC 4
+#define GSC_MAX_DST 16
+#define GSC_RESET_TIMEOUT 50
+#define GSC_BUF_STOP 1
+#define GSC_BUF_START 2
+#define GSC_REG_SZ 16
+#define GSC_WIDTH_ITU_709 1280
+#define GSC_SC_UP_MAX_RATIO 65536
+#define GSC_SC_DOWN_RATIO_7_8 74898
+#define GSC_SC_DOWN_RATIO_6_8 87381
+#define GSC_SC_DOWN_RATIO_5_8 104857
+#define GSC_SC_DOWN_RATIO_4_8 131072
+#define GSC_SC_DOWN_RATIO_3_8 174762
+#define GSC_SC_DOWN_RATIO_2_8 262144
+#define GSC_REFRESH_MIN 12
+#define GSC_REFRESH_MAX 60
+#define GSC_CROP_MAX 8192
+#define GSC_CROP_MIN 32
+#define GSC_SCALE_MAX 4224
+#define GSC_SCALE_MIN 32
+#define GSC_COEF_RATIO 7
+#define GSC_COEF_PHASE 9
+#define GSC_COEF_ATTR 16
+#define GSC_COEF_H_8T 8
+#define GSC_COEF_V_4T 4
+#define GSC_COEF_DEPTH 3
+
+#define get_gsc_context(dev) platform_get_drvdata(to_platform_device(dev))
+#define get_ctx_from_ippdrv(ippdrv) container_of(ippdrv,\
+ struct gsc_context, ippdrv);
+#define gsc_read(offset) readl(ctx->regs + (offset))
+#define gsc_write(cfg, offset) writel(cfg, ctx->regs + (offset))
+
+/*
+ * A structure of scaler.
+ *
+ * @range: narrow, wide.
+ * @pre_shfactor: pre sclaer shift factor.
+ * @pre_hratio: horizontal ratio of the prescaler.
+ * @pre_vratio: vertical ratio of the prescaler.
+ * @main_hratio: the main scaler's horizontal ratio.
+ * @main_vratio: the main scaler's vertical ratio.
+ */
+struct gsc_scaler {
+ bool range;
+ u32 pre_shfactor;
+ u32 pre_hratio;
+ u32 pre_vratio;
+ unsigned long main_hratio;
+ unsigned long main_vratio;
+};
+
+/*
+ * A structure of scaler capability.
+ *
+ * find user manual 49.2 features.
+ * @tile_w: tile mode or rotation width.
+ * @tile_h: tile mode or rotation height.
+ * @w: other cases width.
+ * @h: other cases height.
+ */
+struct gsc_capability {
+ /* tile or rotation */
+ u32 tile_w;
+ u32 tile_h;
+ /* other cases */
+ u32 w;
+ u32 h;
+};
+
+/*
+ * A structure of gsc context.
+ *
+ * @ippdrv: prepare initialization using ippdrv.
+ * @regs_res: register resources.
+ * @regs: memory mapped io registers.
+ * @lock: locking of operations.
+ * @gsc_clk: gsc gate clock.
+ * @sc: scaler infomations.
+ * @id: gsc id.
+ * @irq: irq number.
+ * @rotation: supports rotation of src.
+ * @suspended: qos operations.
+ */
+struct gsc_context {
+ struct exynos_drm_ippdrv ippdrv;
+ struct resource *regs_res;
+ void __iomem *regs;
+ struct mutex lock;
+ struct clk *gsc_clk;
+ struct gsc_scaler sc;
+ int id;
+ int irq;
+ bool rotation;
+ bool suspended;
+};
+
+/* 8-tap Filter Coefficient */
+static const int h_coef_8t[GSC_COEF_RATIO][GSC_COEF_ATTR][GSC_COEF_H_8T] = {
+ { /* Ratio <= 65536 (~8:8) */
+ { 0, 0, 0, 128, 0, 0, 0, 0 },
+ { -1, 2, -6, 127, 7, -2, 1, 0 },
+ { -1, 4, -12, 125, 16, -5, 1, 0 },
+ { -1, 5, -15, 120, 25, -8, 2, 0 },
+ { -1, 6, -18, 114, 35, -10, 3, -1 },
+ { -1, 6, -20, 107, 46, -13, 4, -1 },
+ { -2, 7, -21, 99, 57, -16, 5, -1 },
+ { -1, 6, -20, 89, 68, -18, 5, -1 },
+ { -1, 6, -20, 79, 79, -20, 6, -1 },
+ { -1, 5, -18, 68, 89, -20, 6, -1 },
+ { -1, 5, -16, 57, 99, -21, 7, -2 },
+ { -1, 4, -13, 46, 107, -20, 6, -1 },
+ { -1, 3, -10, 35, 114, -18, 6, -1 },
+ { 0, 2, -8, 25, 120, -15, 5, -1 },
+ { 0, 1, -5, 16, 125, -12, 4, -1 },
+ { 0, 1, -2, 7, 127, -6, 2, -1 }
+ }, { /* 65536 < Ratio <= 74898 (~8:7) */
+ { 3, -8, 14, 111, 13, -8, 3, 0 },
+ { 2, -6, 7, 112, 21, -10, 3, -1 },
+ { 2, -4, 1, 110, 28, -12, 4, -1 },
+ { 1, -2, -3, 106, 36, -13, 4, -1 },
+ { 1, -1, -7, 103, 44, -15, 4, -1 },
+ { 1, 1, -11, 97, 53, -16, 4, -1 },
+ { 0, 2, -13, 91, 61, -16, 4, -1 },
+ { 0, 3, -15, 85, 69, -17, 4, -1 },
+ { 0, 3, -16, 77, 77, -16, 3, 0 },
+ { -1, 4, -17, 69, 85, -15, 3, 0 },
+ { -1, 4, -16, 61, 91, -13, 2, 0 },
+ { -1, 4, -16, 53, 97, -11, 1, 1 },
+ { -1, 4, -15, 44, 103, -7, -1, 1 },
+ { -1, 4, -13, 36, 106, -3, -2, 1 },
+ { -1, 4, -12, 28, 110, 1, -4, 2 },
+ { -1, 3, -10, 21, 112, 7, -6, 2 }
+ }, { /* 74898 < Ratio <= 87381 (~8:6) */
+ { 2, -11, 25, 96, 25, -11, 2, 0 },
+ { 2, -10, 19, 96, 31, -12, 2, 0 },
+ { 2, -9, 14, 94, 37, -12, 2, 0 },
+ { 2, -8, 10, 92, 43, -12, 1, 0 },
+ { 2, -7, 5, 90, 49, -12, 1, 0 },
+ { 2, -5, 1, 86, 55, -12, 0, 1 },
+ { 2, -4, -2, 82, 61, -11, -1, 1 },
+ { 1, -3, -5, 77, 67, -9, -1, 1 },
+ { 1, -2, -7, 72, 72, -7, -2, 1 },
+ { 1, -1, -9, 67, 77, -5, -3, 1 },
+ { 1, -1, -11, 61, 82, -2, -4, 2 },
+ { 1, 0, -12, 55, 86, 1, -5, 2 },
+ { 0, 1, -12, 49, 90, 5, -7, 2 },
+ { 0, 1, -12, 43, 92, 10, -8, 2 },
+ { 0, 2, -12, 37, 94, 14, -9, 2 },
+ { 0, 2, -12, 31, 96, 19, -10, 2 }
+ }, { /* 87381 < Ratio <= 104857 (~8:5) */
+ { -1, -8, 33, 80, 33, -8, -1, 0 },
+ { -1, -8, 28, 80, 37, -7, -2, 1 },
+ { 0, -8, 24, 79, 41, -7, -2, 1 },
+ { 0, -8, 20, 78, 46, -6, -3, 1 },
+ { 0, -8, 16, 76, 50, -4, -3, 1 },
+ { 0, -7, 13, 74, 54, -3, -4, 1 },
+ { 1, -7, 10, 71, 58, -1, -5, 1 },
+ { 1, -6, 6, 68, 62, 1, -5, 1 },
+ { 1, -6, 4, 65, 65, 4, -6, 1 },
+ { 1, -5, 1, 62, 68, 6, -6, 1 },
+ { 1, -5, -1, 58, 71, 10, -7, 1 },
+ { 1, -4, -3, 54, 74, 13, -7, 0 },
+ { 1, -3, -4, 50, 76, 16, -8, 0 },
+ { 1, -3, -6, 46, 78, 20, -8, 0 },
+ { 1, -2, -7, 41, 79, 24, -8, 0 },
+ { 1, -2, -7, 37, 80, 28, -8, -1 }
+ }, { /* 104857 < Ratio <= 131072 (~8:4) */
+ { -3, 0, 35, 64, 35, 0, -3, 0 },
+ { -3, -1, 32, 64, 38, 1, -3, 0 },
+ { -2, -2, 29, 63, 41, 2, -3, 0 },
+ { -2, -3, 27, 63, 43, 4, -4, 0 },
+ { -2, -3, 24, 61, 46, 6, -4, 0 },
+ { -2, -3, 21, 60, 49, 7, -4, 0 },
+ { -1, -4, 19, 59, 51, 9, -4, -1 },
+ { -1, -4, 16, 57, 53, 12, -4, -1 },
+ { -1, -4, 14, 55, 55, 14, -4, -1 },
+ { -1, -4, 12, 53, 57, 16, -4, -1 },
+ { -1, -4, 9, 51, 59, 19, -4, -1 },
+ { 0, -4, 7, 49, 60, 21, -3, -2 },
+ { 0, -4, 6, 46, 61, 24, -3, -2 },
+ { 0, -4, 4, 43, 63, 27, -3, -2 },
+ { 0, -3, 2, 41, 63, 29, -2, -2 },
+ { 0, -3, 1, 38, 64, 32, -1, -3 }
+ }, { /* 131072 < Ratio <= 174762 (~8:3) */
+ { -1, 8, 33, 48, 33, 8, -1, 0 },
+ { -1, 7, 31, 49, 35, 9, -1, -1 },
+ { -1, 6, 30, 49, 36, 10, -1, -1 },
+ { -1, 5, 28, 48, 38, 12, -1, -1 },
+ { -1, 4, 26, 48, 39, 13, 0, -1 },
+ { -1, 3, 24, 47, 41, 15, 0, -1 },
+ { -1, 2, 23, 47, 42, 16, 0, -1 },
+ { -1, 2, 21, 45, 43, 18, 1, -1 },
+ { -1, 1, 19, 45, 45, 19, 1, -1 },
+ { -1, 1, 18, 43, 45, 21, 2, -1 },
+ { -1, 0, 16, 42, 47, 23, 2, -1 },
+ { -1, 0, 15, 41, 47, 24, 3, -1 },
+ { -1, 0, 13, 39, 48, 26, 4, -1 },
+ { -1, -1, 12, 38, 48, 28, 5, -1 },
+ { -1, -1, 10, 36, 49, 30, 6, -1 },
+ { -1, -1, 9, 35, 49, 31, 7, -1 }
+ }, { /* 174762 < Ratio <= 262144 (~8:2) */
+ { 2, 13, 30, 38, 30, 13, 2, 0 },
+ { 2, 12, 29, 38, 30, 14, 3, 0 },
+ { 2, 11, 28, 38, 31, 15, 3, 0 },
+ { 2, 10, 26, 38, 32, 16, 4, 0 },
+ { 1, 10, 26, 37, 33, 17, 4, 0 },
+ { 1, 9, 24, 37, 34, 18, 5, 0 },
+ { 1, 8, 24, 37, 34, 19, 5, 0 },
+ { 1, 7, 22, 36, 35, 20, 6, 1 },
+ { 1, 6, 21, 36, 36, 21, 6, 1 },
+ { 1, 6, 20, 35, 36, 22, 7, 1 },
+ { 0, 5, 19, 34, 37, 24, 8, 1 },
+ { 0, 5, 18, 34, 37, 24, 9, 1 },
+ { 0, 4, 17, 33, 37, 26, 10, 1 },
+ { 0, 4, 16, 32, 38, 26, 10, 2 },
+ { 0, 3, 15, 31, 38, 28, 11, 2 },
+ { 0, 3, 14, 30, 38, 29, 12, 2 }
+ }
+};
+
+/* 4-tap Filter Coefficient */
+static const int v_coef_4t[GSC_COEF_RATIO][GSC_COEF_ATTR][GSC_COEF_V_4T] = {
+ { /* Ratio <= 65536 (~8:8) */
+ { 0, 128, 0, 0 },
+ { -4, 127, 5, 0 },
+ { -6, 124, 11, -1 },
+ { -8, 118, 19, -1 },
+ { -8, 111, 27, -2 },
+ { -8, 102, 37, -3 },
+ { -8, 92, 48, -4 },
+ { -7, 81, 59, -5 },
+ { -6, 70, 70, -6 },
+ { -5, 59, 81, -7 },
+ { -4, 48, 92, -8 },
+ { -3, 37, 102, -8 },
+ { -2, 27, 111, -8 },
+ { -1, 19, 118, -8 },
+ { -1, 11, 124, -6 },
+ { 0, 5, 127, -4 }
+ }, { /* 65536 < Ratio <= 74898 (~8:7) */
+ { 8, 112, 8, 0 },
+ { 4, 111, 14, -1 },
+ { 1, 109, 20, -2 },
+ { -2, 105, 27, -2 },
+ { -3, 100, 34, -3 },
+ { -5, 93, 43, -3 },
+ { -5, 86, 51, -4 },
+ { -5, 77, 60, -4 },
+ { -5, 69, 69, -5 },
+ { -4, 60, 77, -5 },
+ { -4, 51, 86, -5 },
+ { -3, 43, 93, -5 },
+ { -3, 34, 100, -3 },
+ { -2, 27, 105, -2 },
+ { -2, 20, 109, 1 },
+ { -1, 14, 111, 4 }
+ }, { /* 74898 < Ratio <= 87381 (~8:6) */
+ { 16, 96, 16, 0 },
+ { 12, 97, 21, -2 },
+ { 8, 96, 26, -2 },
+ { 5, 93, 32, -2 },
+ { 2, 89, 39, -2 },
+ { 0, 84, 46, -2 },
+ { -1, 79, 53, -3 },
+ { -2, 73, 59, -2 },
+ { -2, 66, 66, -2 },
+ { -2, 59, 73, -2 },
+ { -3, 53, 79, -1 },
+ { -2, 46, 84, 0 },
+ { -2, 39, 89, 2 },
+ { -2, 32, 93, 5 },
+ { -2, 26, 96, 8 },
+ { -2, 21, 97, 12 }
+ }, { /* 87381 < Ratio <= 104857 (~8:5) */
+ { 22, 84, 22, 0 },
+ { 18, 85, 26, -1 },
+ { 14, 84, 31, -1 },
+ { 11, 82, 36, -1 },
+ { 8, 79, 42, -1 },
+ { 6, 76, 47, -1 },
+ { 4, 72, 52, 0 },
+ { 2, 68, 58, 0 },
+ { 1, 63, 63, 1 },
+ { 0, 58, 68, 2 },
+ { 0, 52, 72, 4 },
+ { -1, 47, 76, 6 },
+ { -1, 42, 79, 8 },
+ { -1, 36, 82, 11 },
+ { -1, 31, 84, 14 },
+ { -1, 26, 85, 18 }
+ }, { /* 104857 < Ratio <= 131072 (~8:4) */
+ { 26, 76, 26, 0 },
+ { 22, 76, 30, 0 },
+ { 19, 75, 34, 0 },
+ { 16, 73, 38, 1 },
+ { 13, 71, 43, 1 },
+ { 10, 69, 47, 2 },
+ { 8, 66, 51, 3 },
+ { 6, 63, 55, 4 },
+ { 5, 59, 59, 5 },
+ { 4, 55, 63, 6 },
+ { 3, 51, 66, 8 },
+ { 2, 47, 69, 10 },
+ { 1, 43, 71, 13 },
+ { 1, 38, 73, 16 },
+ { 0, 34, 75, 19 },
+ { 0, 30, 76, 22 }
+ }, { /* 131072 < Ratio <= 174762 (~8:3) */
+ { 29, 70, 29, 0 },
+ { 26, 68, 32, 2 },
+ { 23, 67, 36, 2 },
+ { 20, 66, 39, 3 },
+ { 17, 65, 43, 3 },
+ { 15, 63, 46, 4 },
+ { 12, 61, 50, 5 },
+ { 10, 58, 53, 7 },
+ { 8, 56, 56, 8 },
+ { 7, 53, 58, 10 },
+ { 5, 50, 61, 12 },
+ { 4, 46, 63, 15 },
+ { 3, 43, 65, 17 },
+ { 3, 39, 66, 20 },
+ { 2, 36, 67, 23 },
+ { 2, 32, 68, 26 }
+ }, { /* 174762 < Ratio <= 262144 (~8:2) */
+ { 32, 64, 32, 0 },
+ { 28, 63, 34, 3 },
+ { 25, 62, 37, 4 },
+ { 22, 62, 40, 4 },
+ { 19, 61, 43, 5 },
+ { 17, 59, 46, 6 },
+ { 15, 58, 48, 7 },
+ { 13, 55, 51, 9 },
+ { 11, 53, 53, 11 },
+ { 9, 51, 55, 13 },
+ { 7, 48, 58, 15 },
+ { 6, 46, 59, 17 },
+ { 5, 43, 61, 19 },
+ { 4, 40, 62, 22 },
+ { 4, 37, 62, 25 },
+ { 3, 34, 63, 28 }
+ }
+};
+
+static int gsc_sw_reset(struct gsc_context *ctx)
+{
+ u32 cfg;
+ int count = GSC_RESET_TIMEOUT;
+
+ DRM_DEBUG_KMS("%s\n", __func__);
+
+ /* s/w reset */
+ cfg = (GSC_SW_RESET_SRESET);
+ gsc_write(cfg, GSC_SW_RESET);
+
+ /* wait s/w reset complete */
+ while (count--) {
+ cfg = gsc_read(GSC_SW_RESET);
+ if (!cfg)
+ break;
+ usleep_range(1000, 2000);
+ }
+
+ if (cfg) {
+ DRM_ERROR("failed to reset gsc h/w.\n");
+ return -EBUSY;
+ }
+
+ /* reset sequence */
+ cfg = gsc_read(GSC_IN_BASE_ADDR_Y_MASK);
+ cfg |= (GSC_IN_BASE_ADDR_MASK |
+ GSC_IN_BASE_ADDR_PINGPONG(0));
+ gsc_write(cfg, GSC_IN_BASE_ADDR_Y_MASK);
+ gsc_write(cfg, GSC_IN_BASE_ADDR_CB_MASK);
+ gsc_write(cfg, GSC_IN_BASE_ADDR_CR_MASK);
+
+ cfg = gsc_read(GSC_OUT_BASE_ADDR_Y_MASK);
+ cfg |= (GSC_OUT_BASE_ADDR_MASK |
+ GSC_OUT_BASE_ADDR_PINGPONG(0));
+ gsc_write(cfg, GSC_OUT_BASE_ADDR_Y_MASK);
+ gsc_write(cfg, GSC_OUT_BASE_ADDR_CB_MASK);
+ gsc_write(cfg, GSC_OUT_BASE_ADDR_CR_MASK);
+
+ return 0;
+}
+
+static void gsc_set_gscblk_fimd_wb(struct gsc_context *ctx, bool enable)
+{
+ u32 gscblk_cfg;
+
+ DRM_DEBUG_KMS("%s\n", __func__);
+
+ gscblk_cfg = readl(SYSREG_GSCBLK_CFG1);
+
+ if (enable)
+ gscblk_cfg |= GSC_BLK_DISP1WB_DEST(ctx->id) |
+ GSC_BLK_GSCL_WB_IN_SRC_SEL(ctx->id) |
+ GSC_BLK_SW_RESET_WB_DEST(ctx->id);
+ else
+ gscblk_cfg |= GSC_BLK_PXLASYNC_LO_MASK_WB(ctx->id);
+
+ writel(gscblk_cfg, SYSREG_GSCBLK_CFG1);
+}
+
+static void gsc_handle_irq(struct gsc_context *ctx, bool enable,
+ bool overflow, bool done)
+{
+ u32 cfg;
+
+ DRM_DEBUG_KMS("%s:enable[%d]overflow[%d]level[%d]\n", __func__,
+ enable, overflow, done);
+
+ cfg = gsc_read(GSC_IRQ);
+ cfg |= (GSC_IRQ_OR_MASK | GSC_IRQ_FRMDONE_MASK);
+
+ if (enable)
+ cfg |= GSC_IRQ_ENABLE;
+ else
+ cfg &= ~GSC_IRQ_ENABLE;
+
+ if (overflow)
+ cfg &= ~GSC_IRQ_OR_MASK;
+ else
+ cfg |= GSC_IRQ_OR_MASK;
+
+ if (done)
+ cfg &= ~GSC_IRQ_FRMDONE_MASK;
+ else
+ cfg |= GSC_IRQ_FRMDONE_MASK;
+
+ gsc_write(cfg, GSC_IRQ);
+}
+
+
+static int gsc_src_set_fmt(struct device *dev, u32 fmt)
+{
+ struct gsc_context *ctx = get_gsc_context(dev);
+ struct exynos_drm_ippdrv *ippdrv = &ctx->ippdrv;
+ u32 cfg;
+
+ DRM_DEBUG_KMS("%s:fmt[0x%x]\n", __func__, fmt);
+
+ cfg = gsc_read(GSC_IN_CON);
+ cfg &= ~(GSC_IN_RGB_TYPE_MASK | GSC_IN_YUV422_1P_ORDER_MASK |
+ GSC_IN_CHROMA_ORDER_MASK | GSC_IN_FORMAT_MASK |
+ GSC_IN_TILE_TYPE_MASK | GSC_IN_TILE_MODE |
+ GSC_IN_CHROM_STRIDE_SEL_MASK | GSC_IN_RB_SWAP_MASK);
+
+ switch (fmt) {
+ case DRM_FORMAT_RGB565:
+ cfg |= GSC_IN_RGB565;
+ break;
+ case DRM_FORMAT_XRGB8888:
+ cfg |= GSC_IN_XRGB8888;
+ break;
+ case DRM_FORMAT_BGRX8888:
+ cfg |= (GSC_IN_XRGB8888 | GSC_IN_RB_SWAP);
+ break;
+ case DRM_FORMAT_YUYV:
+ cfg |= (GSC_IN_YUV422_1P |
+ GSC_IN_YUV422_1P_ORDER_LSB_Y |
+ GSC_IN_CHROMA_ORDER_CBCR);
+ break;
+ case DRM_FORMAT_YVYU:
+ cfg |= (GSC_IN_YUV422_1P |
+ GSC_IN_YUV422_1P_ORDER_LSB_Y |
+ GSC_IN_CHROMA_ORDER_CRCB);
+ break;
+ case DRM_FORMAT_UYVY:
+ cfg |= (GSC_IN_YUV422_1P |
+ GSC_IN_YUV422_1P_OEDER_LSB_C |
+ GSC_IN_CHROMA_ORDER_CBCR);
+ break;
+ case DRM_FORMAT_VYUY:
+ cfg |= (GSC_IN_YUV422_1P |
+ GSC_IN_YUV422_1P_OEDER_LSB_C |
+ GSC_IN_CHROMA_ORDER_CRCB);
+ break;
+ case DRM_FORMAT_NV21:
+ case DRM_FORMAT_NV61:
+ cfg |= (GSC_IN_CHROMA_ORDER_CRCB |
+ GSC_IN_YUV420_2P);
+ break;
+ case DRM_FORMAT_YUV422:
+ cfg |= GSC_IN_YUV422_3P;
+ break;
+ case DRM_FORMAT_YUV420:
+ case DRM_FORMAT_YVU420:
+ cfg |= GSC_IN_YUV420_3P;
+ break;
+ case DRM_FORMAT_NV12:
+ case DRM_FORMAT_NV16:
+ cfg |= (GSC_IN_CHROMA_ORDER_CBCR |
+ GSC_IN_YUV420_2P);
+ break;
+ case DRM_FORMAT_NV12MT:
+ cfg |= (GSC_IN_TILE_C_16x8 | GSC_IN_TILE_MODE);
+ break;
+ default:
+ dev_err(ippdrv->dev, "inavlid target yuv order 0x%x.\n", fmt);
+ return -EINVAL;
+ }
+
+ gsc_write(cfg, GSC_IN_CON);
+
+ return 0;
+}
+
+static int gsc_src_set_transf(struct device *dev,
+ enum drm_exynos_degree degree,
+ enum drm_exynos_flip flip, bool *swap)
+{
+ struct gsc_context *ctx = get_gsc_context(dev);
+ struct exynos_drm_ippdrv *ippdrv = &ctx->ippdrv;
+ u32 cfg;
+
+ DRM_DEBUG_KMS("%s:degree[%d]flip[0x%x]\n", __func__,
+ degree, flip);
+
+ cfg = gsc_read(GSC_IN_CON);
+ cfg &= ~GSC_IN_ROT_MASK;
+
+ switch (degree) {
+ case EXYNOS_DRM_DEGREE_0:
+ if (flip & EXYNOS_DRM_FLIP_VERTICAL)
+ cfg |= GSC_IN_ROT_XFLIP;
+ if (flip & EXYNOS_DRM_FLIP_HORIZONTAL)
+ cfg |= GSC_IN_ROT_YFLIP;
+ break;
+ case EXYNOS_DRM_DEGREE_90:
+ if (flip & EXYNOS_DRM_FLIP_VERTICAL)
+ cfg |= GSC_IN_ROT_90_XFLIP;
+ else if (flip & EXYNOS_DRM_FLIP_HORIZONTAL)
+ cfg |= GSC_IN_ROT_90_YFLIP;
+ else
+ cfg |= GSC_IN_ROT_90;
+ break;
+ case EXYNOS_DRM_DEGREE_180:
+ cfg |= GSC_IN_ROT_180;
+ break;
+ case EXYNOS_DRM_DEGREE_270:
+ cfg |= GSC_IN_ROT_270;
+ break;
+ default:
+ dev_err(ippdrv->dev, "inavlid degree value %d.\n", degree);
+ return -EINVAL;
+ }
+
+ gsc_write(cfg, GSC_IN_CON);
+
+ ctx->rotation = cfg &
+ (GSC_IN_ROT_90 | GSC_IN_ROT_270) ? 1 : 0;
+ *swap = ctx->rotation;
+
+ return 0;
+}
+
+static int gsc_src_set_size(struct device *dev, int swap,
+ struct drm_exynos_pos *pos, struct drm_exynos_sz *sz)
+{
+ struct gsc_context *ctx = get_gsc_context(dev);
+ struct drm_exynos_pos img_pos = *pos;
+ struct gsc_scaler *sc = &ctx->sc;
+ u32 cfg;
+
+ DRM_DEBUG_KMS("%s:swap[%d]x[%d]y[%d]w[%d]h[%d]\n",
+ __func__, swap, pos->x, pos->y, pos->w, pos->h);
+
+ if (swap) {
+ img_pos.w = pos->h;
+ img_pos.h = pos->w;
+ }
+
+ /* pixel offset */
+ cfg = (GSC_SRCIMG_OFFSET_X(img_pos.x) |
+ GSC_SRCIMG_OFFSET_Y(img_pos.y));
+ gsc_write(cfg, GSC_SRCIMG_OFFSET);
+
+ /* cropped size */
+ cfg = (GSC_CROPPED_WIDTH(img_pos.w) |
+ GSC_CROPPED_HEIGHT(img_pos.h));
+ gsc_write(cfg, GSC_CROPPED_SIZE);
+
+ DRM_DEBUG_KMS("%s:hsize[%d]vsize[%d]\n",
+ __func__, sz->hsize, sz->vsize);
+
+ /* original size */
+ cfg = gsc_read(GSC_SRCIMG_SIZE);
+ cfg &= ~(GSC_SRCIMG_HEIGHT_MASK |
+ GSC_SRCIMG_WIDTH_MASK);
+
+ cfg |= (GSC_SRCIMG_WIDTH(sz->hsize) |
+ GSC_SRCIMG_HEIGHT(sz->vsize));
+
+ gsc_write(cfg, GSC_SRCIMG_SIZE);
+
+ cfg = gsc_read(GSC_IN_CON);
+ cfg &= ~GSC_IN_RGB_TYPE_MASK;
+
+ DRM_DEBUG_KMS("%s:width[%d]range[%d]\n",
+ __func__, pos->w, sc->range);
+
+ if (pos->w >= GSC_WIDTH_ITU_709)
+ if (sc->range)
+ cfg |= GSC_IN_RGB_HD_WIDE;
+ else
+ cfg |= GSC_IN_RGB_HD_NARROW;
+ else
+ if (sc->range)
+ cfg |= GSC_IN_RGB_SD_WIDE;
+ else
+ cfg |= GSC_IN_RGB_SD_NARROW;
+
+ gsc_write(cfg, GSC_IN_CON);
+
+ return 0;
+}
+
+static int gsc_src_set_buf_seq(struct gsc_context *ctx, u32 buf_id,
+ enum drm_exynos_ipp_buf_type buf_type)
+{
+ struct exynos_drm_ippdrv *ippdrv = &ctx->ippdrv;
+ bool masked;
+ u32 cfg;
+ u32 mask = 0x00000001 << buf_id;
+
+ DRM_DEBUG_KMS("%s:buf_id[%d]buf_type[%d]\n", __func__,
+ buf_id, buf_type);
+
+ /* mask register set */
+ cfg = gsc_read(GSC_IN_BASE_ADDR_Y_MASK);
+
+ switch (buf_type) {
+ case IPP_BUF_ENQUEUE:
+ masked = false;
+ break;
+ case IPP_BUF_DEQUEUE:
+ masked = true;
+ break;
+ default:
+ dev_err(ippdrv->dev, "invalid buf ctrl parameter.\n");
+ return -EINVAL;
+ }
+
+ /* sequence id */
+ cfg &= ~mask;
+ cfg |= masked << buf_id;
+ gsc_write(cfg, GSC_IN_BASE_ADDR_Y_MASK);
+ gsc_write(cfg, GSC_IN_BASE_ADDR_CB_MASK);
+ gsc_write(cfg, GSC_IN_BASE_ADDR_CR_MASK);
+
+ return 0;
+}
+
+static int gsc_src_set_addr(struct device *dev,
+ struct drm_exynos_ipp_buf_info *buf_info, u32 buf_id,
+ enum drm_exynos_ipp_buf_type buf_type)
+{
+ struct gsc_context *ctx = get_gsc_context(dev);
+ struct exynos_drm_ippdrv *ippdrv = &ctx->ippdrv;
+ struct drm_exynos_ipp_cmd_node *c_node = ippdrv->cmd;
+ struct drm_exynos_ipp_property *property;
+
+ if (!c_node) {
+ DRM_ERROR("failed to get c_node.\n");
+ return -EFAULT;
+ }
+
+ property = &c_node->property;
+ if (!property) {
+ DRM_ERROR("failed to get property.\n");
+ return -EFAULT;
+ }
+
+ DRM_DEBUG_KMS("%s:prop_id[%d]buf_id[%d]buf_type[%d]\n", __func__,
+ property->prop_id, buf_id, buf_type);
+
+ if (buf_id > GSC_MAX_SRC) {
+ dev_info(ippdrv->dev, "inavlid buf_id %d.\n", buf_id);
+ return -EINVAL;
+ }
+
+ /* address register set */
+ switch (buf_type) {
+ case IPP_BUF_ENQUEUE:
+ gsc_write(buf_info->base[EXYNOS_DRM_PLANAR_Y],
+ GSC_IN_BASE_ADDR_Y(buf_id));
+ gsc_write(buf_info->base[EXYNOS_DRM_PLANAR_CB],
+ GSC_IN_BASE_ADDR_CB(buf_id));
+ gsc_write(buf_info->base[EXYNOS_DRM_PLANAR_CR],
+ GSC_IN_BASE_ADDR_CR(buf_id));
+ break;
+ case IPP_BUF_DEQUEUE:
+ gsc_write(0x0, GSC_IN_BASE_ADDR_Y(buf_id));
+ gsc_write(0x0, GSC_IN_BASE_ADDR_CB(buf_id));
+ gsc_write(0x0, GSC_IN_BASE_ADDR_CR(buf_id));
+ break;
+ default:
+ /* bypass */
+ break;
+ }
+
+ return gsc_src_set_buf_seq(ctx, buf_id, buf_type);
+}
+
+static struct exynos_drm_ipp_ops gsc_src_ops = {
+ .set_fmt = gsc_src_set_fmt,
+ .set_transf = gsc_src_set_transf,
+ .set_size = gsc_src_set_size,
+ .set_addr = gsc_src_set_addr,
+};
+
+static int gsc_dst_set_fmt(struct device *dev, u32 fmt)
+{
+ struct gsc_context *ctx = get_gsc_context(dev);
+ struct exynos_drm_ippdrv *ippdrv = &ctx->ippdrv;
+ u32 cfg;
+
+ DRM_DEBUG_KMS("%s:fmt[0x%x]\n", __func__, fmt);
+
+ cfg = gsc_read(GSC_OUT_CON);
+ cfg &= ~(GSC_OUT_RGB_TYPE_MASK | GSC_OUT_YUV422_1P_ORDER_MASK |
+ GSC_OUT_CHROMA_ORDER_MASK | GSC_OUT_FORMAT_MASK |
+ GSC_OUT_CHROM_STRIDE_SEL_MASK | GSC_OUT_RB_SWAP_MASK |
+ GSC_OUT_GLOBAL_ALPHA_MASK);
+
+ switch (fmt) {
+ case DRM_FORMAT_RGB565:
+ cfg |= GSC_OUT_RGB565;
+ break;
+ case DRM_FORMAT_XRGB8888:
+ cfg |= GSC_OUT_XRGB8888;
+ break;
+ case DRM_FORMAT_BGRX8888:
+ cfg |= (GSC_OUT_XRGB8888 | GSC_OUT_RB_SWAP);
+ break;
+ case DRM_FORMAT_YUYV:
+ cfg |= (GSC_OUT_YUV422_1P |
+ GSC_OUT_YUV422_1P_ORDER_LSB_Y |
+ GSC_OUT_CHROMA_ORDER_CBCR);
+ break;
+ case DRM_FORMAT_YVYU:
+ cfg |= (GSC_OUT_YUV422_1P |
+ GSC_OUT_YUV422_1P_ORDER_LSB_Y |
+ GSC_OUT_CHROMA_ORDER_CRCB);
+ break;
+ case DRM_FORMAT_UYVY:
+ cfg |= (GSC_OUT_YUV422_1P |
+ GSC_OUT_YUV422_1P_OEDER_LSB_C |
+ GSC_OUT_CHROMA_ORDER_CBCR);
+ break;
+ case DRM_FORMAT_VYUY:
+ cfg |= (GSC_OUT_YUV422_1P |
+ GSC_OUT_YUV422_1P_OEDER_LSB_C |
+ GSC_OUT_CHROMA_ORDER_CRCB);
+ break;
+ case DRM_FORMAT_NV21:
+ case DRM_FORMAT_NV61:
+ cfg |= (GSC_OUT_CHROMA_ORDER_CRCB | GSC_OUT_YUV420_2P);
+ break;
+ case DRM_FORMAT_YUV422:
+ case DRM_FORMAT_YUV420:
+ case DRM_FORMAT_YVU420:
+ cfg |= GSC_OUT_YUV420_3P;
+ break;
+ case DRM_FORMAT_NV12:
+ case DRM_FORMAT_NV16:
+ cfg |= (GSC_OUT_CHROMA_ORDER_CBCR |
+ GSC_OUT_YUV420_2P);
+ break;
+ case DRM_FORMAT_NV12MT:
+ cfg |= (GSC_OUT_TILE_C_16x8 | GSC_OUT_TILE_MODE);
+ break;
+ default:
+ dev_err(ippdrv->dev, "inavlid target yuv order 0x%x.\n", fmt);
+ return -EINVAL;
+ }
+
+ gsc_write(cfg, GSC_OUT_CON);
+
+ return 0;
+}
+
+static int gsc_dst_set_transf(struct device *dev,
+ enum drm_exynos_degree degree,
+ enum drm_exynos_flip flip, bool *swap)
+{
+ struct gsc_context *ctx = get_gsc_context(dev);
+ struct exynos_drm_ippdrv *ippdrv = &ctx->ippdrv;
+ u32 cfg;
+
+ DRM_DEBUG_KMS("%s:degree[%d]flip[0x%x]\n", __func__,
+ degree, flip);
+
+ cfg = gsc_read(GSC_IN_CON);
+ cfg &= ~GSC_IN_ROT_MASK;
+
+ switch (degree) {
+ case EXYNOS_DRM_DEGREE_0:
+ if (flip & EXYNOS_DRM_FLIP_VERTICAL)
+ cfg |= GSC_IN_ROT_XFLIP;
+ if (flip & EXYNOS_DRM_FLIP_HORIZONTAL)
+ cfg |= GSC_IN_ROT_YFLIP;
+ break;
+ case EXYNOS_DRM_DEGREE_90:
+ if (flip & EXYNOS_DRM_FLIP_VERTICAL)
+ cfg |= GSC_IN_ROT_90_XFLIP;
+ else if (flip & EXYNOS_DRM_FLIP_HORIZONTAL)
+ cfg |= GSC_IN_ROT_90_YFLIP;
+ else
+ cfg |= GSC_IN_ROT_90;
+ break;
+ case EXYNOS_DRM_DEGREE_180:
+ cfg |= GSC_IN_ROT_180;
+ break;
+ case EXYNOS_DRM_DEGREE_270:
+ cfg |= GSC_IN_ROT_270;
+ break;
+ default:
+ dev_err(ippdrv->dev, "inavlid degree value %d.\n", degree);
+ return -EINVAL;
+ }
+
+ gsc_write(cfg, GSC_IN_CON);
+
+ ctx->rotation = cfg &
+ (GSC_IN_ROT_90 | GSC_IN_ROT_270) ? 1 : 0;
+ *swap = ctx->rotation;
+
+ return 0;
+}
+
+static int gsc_get_ratio_shift(u32 src, u32 dst, u32 *ratio)
+{
+ DRM_DEBUG_KMS("%s:src[%d]dst[%d]\n", __func__, src, dst);
+
+ if (src >= dst * 8) {
+ DRM_ERROR("failed to make ratio and shift.\n");
+ return -EINVAL;
+ } else if (src >= dst * 4)
+ *ratio = 4;
+ else if (src >= dst * 2)
+ *ratio = 2;
+ else
+ *ratio = 1;
+
+ return 0;
+}
+
+static void gsc_get_prescaler_shfactor(u32 hratio, u32 vratio, u32 *shfactor)
+{
+ if (hratio == 4 && vratio == 4)
+ *shfactor = 4;
+ else if ((hratio == 4 && vratio == 2) ||
+ (hratio == 2 && vratio == 4))
+ *shfactor = 3;
+ else if ((hratio == 4 && vratio == 1) ||
+ (hratio == 1 && vratio == 4) ||
+ (hratio == 2 && vratio == 2))
+ *shfactor = 2;
+ else if (hratio == 1 && vratio == 1)
+ *shfactor = 0;
+ else
+ *shfactor = 1;
+}
+
+static int gsc_set_prescaler(struct gsc_context *ctx, struct gsc_scaler *sc,
+ struct drm_exynos_pos *src, struct drm_exynos_pos *dst)
+{
+ struct exynos_drm_ippdrv *ippdrv = &ctx->ippdrv;
+ u32 cfg;
+ u32 src_w, src_h, dst_w, dst_h;
+ int ret = 0;
+
+ src_w = src->w;
+ src_h = src->h;
+
+ if (ctx->rotation) {
+ dst_w = dst->h;
+ dst_h = dst->w;
+ } else {
+ dst_w = dst->w;
+ dst_h = dst->h;
+ }
+
+ ret = gsc_get_ratio_shift(src_w, dst_w, &sc->pre_hratio);
+ if (ret) {
+ dev_err(ippdrv->dev, "failed to get ratio horizontal.\n");
+ return ret;
+ }
+
+ ret = gsc_get_ratio_shift(src_h, dst_h, &sc->pre_vratio);
+ if (ret) {
+ dev_err(ippdrv->dev, "failed to get ratio vertical.\n");
+ return ret;
+ }
+
+ DRM_DEBUG_KMS("%s:pre_hratio[%d]pre_vratio[%d]\n",
+ __func__, sc->pre_hratio, sc->pre_vratio);
+
+ sc->main_hratio = (src_w << 16) / dst_w;
+ sc->main_vratio = (src_h << 16) / dst_h;
+
+ DRM_DEBUG_KMS("%s:main_hratio[%ld]main_vratio[%ld]\n",
+ __func__, sc->main_hratio, sc->main_vratio);
+
+ gsc_get_prescaler_shfactor(sc->pre_hratio, sc->pre_vratio,
+ &sc->pre_shfactor);
+
+ DRM_DEBUG_KMS("%s:pre_shfactor[%d]\n", __func__,
+ sc->pre_shfactor);
+
+ cfg = (GSC_PRESC_SHFACTOR(sc->pre_shfactor) |
+ GSC_PRESC_H_RATIO(sc->pre_hratio) |
+ GSC_PRESC_V_RATIO(sc->pre_vratio));
+ gsc_write(cfg, GSC_PRE_SCALE_RATIO);
+
+ return ret;
+}
+
+static void gsc_set_h_coef(struct gsc_context *ctx, unsigned long main_hratio)
+{
+ int i, j, k, sc_ratio;
+
+ if (main_hratio <= GSC_SC_UP_MAX_RATIO)
+ sc_ratio = 0;
+ else if (main_hratio <= GSC_SC_DOWN_RATIO_7_8)
+ sc_ratio = 1;
+ else if (main_hratio <= GSC_SC_DOWN_RATIO_6_8)
+ sc_ratio = 2;
+ else if (main_hratio <= GSC_SC_DOWN_RATIO_5_8)
+ sc_ratio = 3;
+ else if (main_hratio <= GSC_SC_DOWN_RATIO_4_8)
+ sc_ratio = 4;
+ else if (main_hratio <= GSC_SC_DOWN_RATIO_3_8)
+ sc_ratio = 5;
+ else
+ sc_ratio = 6;
+
+ for (i = 0; i < GSC_COEF_PHASE; i++)
+ for (j = 0; j < GSC_COEF_H_8T; j++)
+ for (k = 0; k < GSC_COEF_DEPTH; k++)
+ gsc_write(h_coef_8t[sc_ratio][i][j],
+ GSC_HCOEF(i, j, k));
+}
+
+static void gsc_set_v_coef(struct gsc_context *ctx, unsigned long main_vratio)
+{
+ int i, j, k, sc_ratio;
+
+ if (main_vratio <= GSC_SC_UP_MAX_RATIO)
+ sc_ratio = 0;
+ else if (main_vratio <= GSC_SC_DOWN_RATIO_7_8)
+ sc_ratio = 1;
+ else if (main_vratio <= GSC_SC_DOWN_RATIO_6_8)
+ sc_ratio = 2;
+ else if (main_vratio <= GSC_SC_DOWN_RATIO_5_8)
+ sc_ratio = 3;
+ else if (main_vratio <= GSC_SC_DOWN_RATIO_4_8)
+ sc_ratio = 4;
+ else if (main_vratio <= GSC_SC_DOWN_RATIO_3_8)
+ sc_ratio = 5;
+ else
+ sc_ratio = 6;
+
+ for (i = 0; i < GSC_COEF_PHASE; i++)
+ for (j = 0; j < GSC_COEF_V_4T; j++)
+ for (k = 0; k < GSC_COEF_DEPTH; k++)
+ gsc_write(v_coef_4t[sc_ratio][i][j],
+ GSC_VCOEF(i, j, k));
+}
+
+static void gsc_set_scaler(struct gsc_context *ctx, struct gsc_scaler *sc)
+{
+ u32 cfg;
+
+ DRM_DEBUG_KMS("%s:main_hratio[%ld]main_vratio[%ld]\n",
+ __func__, sc->main_hratio, sc->main_vratio);
+
+ gsc_set_h_coef(ctx, sc->main_hratio);
+ cfg = GSC_MAIN_H_RATIO_VALUE(sc->main_hratio);
+ gsc_write(cfg, GSC_MAIN_H_RATIO);
+
+ gsc_set_v_coef(ctx, sc->main_vratio);
+ cfg = GSC_MAIN_V_RATIO_VALUE(sc->main_vratio);
+ gsc_write(cfg, GSC_MAIN_V_RATIO);
+}
+
+static int gsc_dst_set_size(struct device *dev, int swap,
+ struct drm_exynos_pos *pos, struct drm_exynos_sz *sz)
+{
+ struct gsc_context *ctx = get_gsc_context(dev);
+ struct drm_exynos_pos img_pos = *pos;
+ struct gsc_scaler *sc = &ctx->sc;
+ u32 cfg;
+
+ DRM_DEBUG_KMS("%s:swap[%d]x[%d]y[%d]w[%d]h[%d]\n",
+ __func__, swap, pos->x, pos->y, pos->w, pos->h);
+
+ if (swap) {
+ img_pos.w = pos->h;
+ img_pos.h = pos->w;
+ }
+
+ /* pixel offset */
+ cfg = (GSC_DSTIMG_OFFSET_X(pos->x) |
+ GSC_DSTIMG_OFFSET_Y(pos->y));
+ gsc_write(cfg, GSC_DSTIMG_OFFSET);
+
+ /* scaled size */
+ cfg = (GSC_SCALED_WIDTH(img_pos.w) | GSC_SCALED_HEIGHT(img_pos.h));
+ gsc_write(cfg, GSC_SCALED_SIZE);
+
+ DRM_DEBUG_KMS("%s:hsize[%d]vsize[%d]\n",
+ __func__, sz->hsize, sz->vsize);
+
+ /* original size */
+ cfg = gsc_read(GSC_DSTIMG_SIZE);
+ cfg &= ~(GSC_DSTIMG_HEIGHT_MASK |
+ GSC_DSTIMG_WIDTH_MASK);
+ cfg |= (GSC_DSTIMG_WIDTH(sz->hsize) |
+ GSC_DSTIMG_HEIGHT(sz->vsize));
+ gsc_write(cfg, GSC_DSTIMG_SIZE);
+
+ cfg = gsc_read(GSC_OUT_CON);
+ cfg &= ~GSC_OUT_RGB_TYPE_MASK;
+
+ DRM_DEBUG_KMS("%s:width[%d]range[%d]\n",
+ __func__, pos->w, sc->range);
+
+ if (pos->w >= GSC_WIDTH_ITU_709)
+ if (sc->range)
+ cfg |= GSC_OUT_RGB_HD_WIDE;
+ else
+ cfg |= GSC_OUT_RGB_HD_NARROW;
+ else
+ if (sc->range)
+ cfg |= GSC_OUT_RGB_SD_WIDE;
+ else
+ cfg |= GSC_OUT_RGB_SD_NARROW;
+
+ gsc_write(cfg, GSC_OUT_CON);
+
+ return 0;
+}
+
+static int gsc_dst_get_buf_seq(struct gsc_context *ctx)
+{
+ u32 cfg, i, buf_num = GSC_REG_SZ;
+ u32 mask = 0x00000001;
+
+ cfg = gsc_read(GSC_OUT_BASE_ADDR_Y_MASK);
+
+ for (i = 0; i < GSC_REG_SZ; i++)
+ if (cfg & (mask << i))
+ buf_num--;
+
+ DRM_DEBUG_KMS("%s:buf_num[%d]\n", __func__, buf_num);
+
+ return buf_num;
+}
+
+static int gsc_dst_set_buf_seq(struct gsc_context *ctx, u32 buf_id,
+ enum drm_exynos_ipp_buf_type buf_type)
+{
+ struct exynos_drm_ippdrv *ippdrv = &ctx->ippdrv;
+ bool masked;
+ u32 cfg;
+ u32 mask = 0x00000001 << buf_id;
+ int ret = 0;
+
+ DRM_DEBUG_KMS("%s:buf_id[%d]buf_type[%d]\n", __func__,
+ buf_id, buf_type);
+
+ mutex_lock(&ctx->lock);
+
+ /* mask register set */
+ cfg = gsc_read(GSC_OUT_BASE_ADDR_Y_MASK);
+
+ switch (buf_type) {
+ case IPP_BUF_ENQUEUE:
+ masked = false;
+ break;
+ case IPP_BUF_DEQUEUE:
+ masked = true;
+ break;
+ default:
+ dev_err(ippdrv->dev, "invalid buf ctrl parameter.\n");
+ ret = -EINVAL;
+ goto err_unlock;
+ }
+
+ /* sequence id */
+ cfg &= ~mask;
+ cfg |= masked << buf_id;
+ gsc_write(cfg, GSC_OUT_BASE_ADDR_Y_MASK);
+ gsc_write(cfg, GSC_OUT_BASE_ADDR_CB_MASK);
+ gsc_write(cfg, GSC_OUT_BASE_ADDR_CR_MASK);
+
+ /* interrupt enable */
+ if (buf_type == IPP_BUF_ENQUEUE &&
+ gsc_dst_get_buf_seq(ctx) >= GSC_BUF_START)
+ gsc_handle_irq(ctx, true, false, true);
+
+ /* interrupt disable */
+ if (buf_type == IPP_BUF_DEQUEUE &&
+ gsc_dst_get_buf_seq(ctx) <= GSC_BUF_STOP)
+ gsc_handle_irq(ctx, false, false, true);
+
+err_unlock:
+ mutex_unlock(&ctx->lock);
+ return ret;
+}
+
+static int gsc_dst_set_addr(struct device *dev,
+ struct drm_exynos_ipp_buf_info *buf_info, u32 buf_id,
+ enum drm_exynos_ipp_buf_type buf_type)
+{
+ struct gsc_context *ctx = get_gsc_context(dev);
+ struct exynos_drm_ippdrv *ippdrv = &ctx->ippdrv;
+ struct drm_exynos_ipp_cmd_node *c_node = ippdrv->cmd;
+ struct drm_exynos_ipp_property *property;
+
+ if (!c_node) {
+ DRM_ERROR("failed to get c_node.\n");
+ return -EFAULT;
+ }
+
+ property = &c_node->property;
+ if (!property) {
+ DRM_ERROR("failed to get property.\n");
+ return -EFAULT;
+ }
+
+ DRM_DEBUG_KMS("%s:prop_id[%d]buf_id[%d]buf_type[%d]\n", __func__,
+ property->prop_id, buf_id, buf_type);
+
+ if (buf_id > GSC_MAX_DST) {
+ dev_info(ippdrv->dev, "inavlid buf_id %d.\n", buf_id);
+ return -EINVAL;
+ }
+
+ /* address register set */
+ switch (buf_type) {
+ case IPP_BUF_ENQUEUE:
+ gsc_write(buf_info->base[EXYNOS_DRM_PLANAR_Y],
+ GSC_OUT_BASE_ADDR_Y(buf_id));
+ gsc_write(buf_info->base[EXYNOS_DRM_PLANAR_CB],
+ GSC_OUT_BASE_ADDR_CB(buf_id));
+ gsc_write(buf_info->base[EXYNOS_DRM_PLANAR_CR],
+ GSC_OUT_BASE_ADDR_CR(buf_id));
+ break;
+ case IPP_BUF_DEQUEUE:
+ gsc_write(0x0, GSC_OUT_BASE_ADDR_Y(buf_id));
+ gsc_write(0x0, GSC_OUT_BASE_ADDR_CB(buf_id));
+ gsc_write(0x0, GSC_OUT_BASE_ADDR_CR(buf_id));
+ break;
+ default:
+ /* bypass */
+ break;
+ }
+
+ return gsc_dst_set_buf_seq(ctx, buf_id, buf_type);
+}
+
+static struct exynos_drm_ipp_ops gsc_dst_ops = {
+ .set_fmt = gsc_dst_set_fmt,
+ .set_transf = gsc_dst_set_transf,
+ .set_size = gsc_dst_set_size,
+ .set_addr = gsc_dst_set_addr,
+};
+
+static int gsc_clk_ctrl(struct gsc_context *ctx, bool enable)
+{
+ DRM_DEBUG_KMS("%s:enable[%d]\n", __func__, enable);
+
+ if (enable) {
+ clk_enable(ctx->gsc_clk);
+ ctx->suspended = false;
+ } else {
+ clk_disable(ctx->gsc_clk);
+ ctx->suspended = true;
+ }
+
+ return 0;
+}
+
+static int gsc_get_src_buf_index(struct gsc_context *ctx)
+{
+ u32 cfg, curr_index, i;
+ u32 buf_id = GSC_MAX_SRC;
+ int ret;
+
+ DRM_DEBUG_KMS("%s:gsc id[%d]\n", __func__, ctx->id);
+
+ cfg = gsc_read(GSC_IN_BASE_ADDR_Y_MASK);
+ curr_index = GSC_IN_CURR_GET_INDEX(cfg);
+
+ for (i = curr_index; i < GSC_MAX_SRC; i++) {
+ if (!((cfg >> i) & 0x1)) {
+ buf_id = i;
+ break;
+ }
+ }
+
+ if (buf_id == GSC_MAX_SRC) {
+ DRM_ERROR("failed to get in buffer index.\n");
+ return -EINVAL;
+ }
+
+ ret = gsc_src_set_buf_seq(ctx, buf_id, IPP_BUF_DEQUEUE);
+ if (ret < 0) {
+ DRM_ERROR("failed to dequeue.\n");
+ return ret;
+ }
+
+ DRM_DEBUG_KMS("%s:cfg[0x%x]curr_index[%d]buf_id[%d]\n", __func__, cfg,
+ curr_index, buf_id);
+
+ return buf_id;
+}
+
+static int gsc_get_dst_buf_index(struct gsc_context *ctx)
+{
+ u32 cfg, curr_index, i;
+ u32 buf_id = GSC_MAX_DST;
+ int ret;
+
+ DRM_DEBUG_KMS("%s:gsc id[%d]\n", __func__, ctx->id);
+
+ cfg = gsc_read(GSC_OUT_BASE_ADDR_Y_MASK);
+ curr_index = GSC_OUT_CURR_GET_INDEX(cfg);
+
+ for (i = curr_index; i < GSC_MAX_DST; i++) {
+ if (!((cfg >> i) & 0x1)) {
+ buf_id = i;
+ break;
+ }
+ }
+
+ if (buf_id == GSC_MAX_DST) {
+ DRM_ERROR("failed to get out buffer index.\n");
+ return -EINVAL;
+ }
+
+ ret = gsc_dst_set_buf_seq(ctx, buf_id, IPP_BUF_DEQUEUE);
+ if (ret < 0) {
+ DRM_ERROR("failed to dequeue.\n");
+ return ret;
+ }
+
+ DRM_DEBUG_KMS("%s:cfg[0x%x]curr_index[%d]buf_id[%d]\n", __func__, cfg,
+ curr_index, buf_id);
+
+ return buf_id;
+}
+
+static irqreturn_t gsc_irq_handler(int irq, void *dev_id)
+{
+ struct gsc_context *ctx = dev_id;
+ struct exynos_drm_ippdrv *ippdrv = &ctx->ippdrv;
+ struct drm_exynos_ipp_cmd_node *c_node = ippdrv->cmd;
+ struct drm_exynos_ipp_event_work *event_work =
+ c_node->event_work;
+ u32 status;
+ int buf_id[EXYNOS_DRM_OPS_MAX];
+
+ DRM_DEBUG_KMS("%s:gsc id[%d]\n", __func__, ctx->id);
+
+ status = gsc_read(GSC_IRQ);
+ if (status & GSC_IRQ_STATUS_OR_IRQ) {
+ dev_err(ippdrv->dev, "occured overflow at %d, status 0x%x.\n",
+ ctx->id, status);
+ return IRQ_NONE;
+ }
+
+ if (status & GSC_IRQ_STATUS_OR_FRM_DONE) {
+ dev_dbg(ippdrv->dev, "occured frame done at %d, status 0x%x.\n",
+ ctx->id, status);
+
+ buf_id[EXYNOS_DRM_OPS_SRC] = gsc_get_src_buf_index(ctx);
+ if (buf_id[EXYNOS_DRM_OPS_SRC] < 0)
+ return IRQ_HANDLED;
+
+ buf_id[EXYNOS_DRM_OPS_DST] = gsc_get_dst_buf_index(ctx);
+ if (buf_id[EXYNOS_DRM_OPS_DST] < 0)
+ return IRQ_HANDLED;
+
+ DRM_DEBUG_KMS("%s:buf_id_src[%d]buf_id_dst[%d]\n", __func__,
+ buf_id[EXYNOS_DRM_OPS_SRC], buf_id[EXYNOS_DRM_OPS_DST]);
+
+ event_work->ippdrv = ippdrv;
+ event_work->buf_id[EXYNOS_DRM_OPS_SRC] =
+ buf_id[EXYNOS_DRM_OPS_SRC];
+ event_work->buf_id[EXYNOS_DRM_OPS_DST] =
+ buf_id[EXYNOS_DRM_OPS_DST];
+ queue_work(ippdrv->event_workq,
+ (struct work_struct *)event_work);
+ }
+
+ return IRQ_HANDLED;
+}
+
+static int gsc_init_prop_list(struct exynos_drm_ippdrv *ippdrv)
+{
+ struct drm_exynos_ipp_prop_list *prop_list;
+
+ DRM_DEBUG_KMS("%s\n", __func__);
+
+ prop_list = devm_kzalloc(ippdrv->dev, sizeof(*prop_list), GFP_KERNEL);
+ if (!prop_list) {
+ DRM_ERROR("failed to alloc property list.\n");
+ return -ENOMEM;
+ }
+
+ prop_list->version = 1;
+ prop_list->writeback = 1;
+ prop_list->refresh_min = GSC_REFRESH_MIN;
+ prop_list->refresh_max = GSC_REFRESH_MAX;
+ prop_list->flip = (1 << EXYNOS_DRM_FLIP_VERTICAL) |
+ (1 << EXYNOS_DRM_FLIP_HORIZONTAL);
+ prop_list->degree = (1 << EXYNOS_DRM_DEGREE_0) |
+ (1 << EXYNOS_DRM_DEGREE_90) |
+ (1 << EXYNOS_DRM_DEGREE_180) |
+ (1 << EXYNOS_DRM_DEGREE_270);
+ prop_list->csc = 1;
+ prop_list->crop = 1;
+ prop_list->crop_max.hsize = GSC_CROP_MAX;
+ prop_list->crop_max.vsize = GSC_CROP_MAX;
+ prop_list->crop_min.hsize = GSC_CROP_MIN;
+ prop_list->crop_min.vsize = GSC_CROP_MIN;
+ prop_list->scale = 1;
+ prop_list->scale_max.hsize = GSC_SCALE_MAX;
+ prop_list->scale_max.vsize = GSC_SCALE_MAX;
+ prop_list->scale_min.hsize = GSC_SCALE_MIN;
+ prop_list->scale_min.vsize = GSC_SCALE_MIN;
+
+ ippdrv->prop_list = prop_list;
+
+ return 0;
+}
+
+static inline bool gsc_check_drm_flip(enum drm_exynos_flip flip)
+{
+ switch (flip) {
+ case EXYNOS_DRM_FLIP_NONE:
+ case EXYNOS_DRM_FLIP_VERTICAL:
+ case EXYNOS_DRM_FLIP_HORIZONTAL:
+ case EXYNOS_DRM_FLIP_VERTICAL | EXYNOS_DRM_FLIP_HORIZONTAL:
+ return true;
+ default:
+ DRM_DEBUG_KMS("%s:invalid flip\n", __func__);
+ return false;
+ }
+}
+
+static int gsc_ippdrv_check_property(struct device *dev,
+ struct drm_exynos_ipp_property *property)
+{
+ struct gsc_context *ctx = get_gsc_context(dev);
+ struct exynos_drm_ippdrv *ippdrv = &ctx->ippdrv;
+ struct drm_exynos_ipp_prop_list *pp = ippdrv->prop_list;
+ struct drm_exynos_ipp_config *config;
+ struct drm_exynos_pos *pos;
+ struct drm_exynos_sz *sz;
+ bool swap;
+ int i;
+
+ DRM_DEBUG_KMS("%s\n", __func__);
+
+ for_each_ipp_ops(i) {
+ if ((i == EXYNOS_DRM_OPS_SRC) &&
+ (property->cmd == IPP_CMD_WB))
+ continue;
+
+ config = &property->config[i];
+ pos = &config->pos;
+ sz = &config->sz;
+
+ /* check for flip */
+ if (!gsc_check_drm_flip(config->flip)) {
+ DRM_ERROR("invalid flip.\n");
+ goto err_property;
+ }
+
+ /* check for degree */
+ switch (config->degree) {
+ case EXYNOS_DRM_DEGREE_90:
+ case EXYNOS_DRM_DEGREE_270:
+ swap = true;
+ break;
+ case EXYNOS_DRM_DEGREE_0:
+ case EXYNOS_DRM_DEGREE_180:
+ swap = false;
+ break;
+ default:
+ DRM_ERROR("invalid degree.\n");
+ goto err_property;
+ }
+
+ /* check for buffer bound */
+ if ((pos->x + pos->w > sz->hsize) ||
+ (pos->y + pos->h > sz->vsize)) {
+ DRM_ERROR("out of buf bound.\n");
+ goto err_property;
+ }
+
+ /* check for crop */
+ if ((i == EXYNOS_DRM_OPS_SRC) && (pp->crop)) {
+ if (swap) {
+ if ((pos->h < pp->crop_min.hsize) ||
+ (sz->vsize > pp->crop_max.hsize) ||
+ (pos->w < pp->crop_min.vsize) ||
+ (sz->hsize > pp->crop_max.vsize)) {
+ DRM_ERROR("out of crop size.\n");
+ goto err_property;
+ }
+ } else {
+ if ((pos->w < pp->crop_min.hsize) ||
+ (sz->hsize > pp->crop_max.hsize) ||
+ (pos->h < pp->crop_min.vsize) ||
+ (sz->vsize > pp->crop_max.vsize)) {
+ DRM_ERROR("out of crop size.\n");
+ goto err_property;
+ }
+ }
+ }
+
+ /* check for scale */
+ if ((i == EXYNOS_DRM_OPS_DST) && (pp->scale)) {
+ if (swap) {
+ if ((pos->h < pp->scale_min.hsize) ||
+ (sz->vsize > pp->scale_max.hsize) ||
+ (pos->w < pp->scale_min.vsize) ||
+ (sz->hsize > pp->scale_max.vsize)) {
+ DRM_ERROR("out of scale size.\n");
+ goto err_property;
+ }
+ } else {
+ if ((pos->w < pp->scale_min.hsize) ||
+ (sz->hsize > pp->scale_max.hsize) ||
+ (pos->h < pp->scale_min.vsize) ||
+ (sz->vsize > pp->scale_max.vsize)) {
+ DRM_ERROR("out of scale size.\n");
+ goto err_property;
+ }
+ }
+ }
+ }
+
+ return 0;
+
+err_property:
+ for_each_ipp_ops(i) {
+ if ((i == EXYNOS_DRM_OPS_SRC) &&
+ (property->cmd == IPP_CMD_WB))
+ continue;
+
+ config = &property->config[i];
+ pos = &config->pos;
+ sz = &config->sz;
+
+ DRM_ERROR("[%s]f[%d]r[%d]pos[%d %d %d %d]sz[%d %d]\n",
+ i ? "dst" : "src", config->flip, config->degree,
+ pos->x, pos->y, pos->w, pos->h,
+ sz->hsize, sz->vsize);
+ }
+
+ return -EINVAL;
+}
+
+
+static int gsc_ippdrv_reset(struct device *dev)
+{
+ struct gsc_context *ctx = get_gsc_context(dev);
+ struct gsc_scaler *sc = &ctx->sc;
+ int ret;
+
+ DRM_DEBUG_KMS("%s\n", __func__);
+
+ /* reset h/w block */
+ ret = gsc_sw_reset(ctx);
+ if (ret < 0) {
+ dev_err(dev, "failed to reset hardware.\n");
+ return ret;
+ }
+
+ /* scaler setting */
+ memset(&ctx->sc, 0x0, sizeof(ctx->sc));
+ sc->range = true;
+
+ return 0;
+}
+
+static int gsc_ippdrv_start(struct device *dev, enum drm_exynos_ipp_cmd cmd)
+{
+ struct gsc_context *ctx = get_gsc_context(dev);
+ struct exynos_drm_ippdrv *ippdrv = &ctx->ippdrv;
+ struct drm_exynos_ipp_cmd_node *c_node = ippdrv->cmd;
+ struct drm_exynos_ipp_property *property;
+ struct drm_exynos_ipp_config *config;
+ struct drm_exynos_pos img_pos[EXYNOS_DRM_OPS_MAX];
+ struct drm_exynos_ipp_set_wb set_wb;
+ u32 cfg;
+ int ret, i;
+
+ DRM_DEBUG_KMS("%s:cmd[%d]\n", __func__, cmd);
+
+ if (!c_node) {
+ DRM_ERROR("failed to get c_node.\n");
+ return -EINVAL;
+ }
+
+ property = &c_node->property;
+ if (!property) {
+ DRM_ERROR("failed to get property.\n");
+ return -EINVAL;
+ }
+
+ gsc_handle_irq(ctx, true, false, true);
+
+ for_each_ipp_ops(i) {
+ config = &property->config[i];
+ img_pos[i] = config->pos;
+ }
+
+ switch (cmd) {
+ case IPP_CMD_M2M:
+ /* enable one shot */
+ cfg = gsc_read(GSC_ENABLE);
+ cfg &= ~(GSC_ENABLE_ON_CLEAR_MASK |
+ GSC_ENABLE_CLK_GATE_MODE_MASK);
+ cfg |= GSC_ENABLE_ON_CLEAR_ONESHOT;
+ gsc_write(cfg, GSC_ENABLE);
+
+ /* src dma memory */
+ cfg = gsc_read(GSC_IN_CON);
+ cfg &= ~(GSC_IN_PATH_MASK | GSC_IN_LOCAL_SEL_MASK);
+ cfg |= GSC_IN_PATH_MEMORY;
+ gsc_write(cfg, GSC_IN_CON);
+
+ /* dst dma memory */
+ cfg = gsc_read(GSC_OUT_CON);
+ cfg |= GSC_OUT_PATH_MEMORY;
+ gsc_write(cfg, GSC_OUT_CON);
+ break;
+ case IPP_CMD_WB:
+ set_wb.enable = 1;
+ set_wb.refresh = property->refresh_rate;
+ gsc_set_gscblk_fimd_wb(ctx, set_wb.enable);
+ exynos_drm_ippnb_send_event(IPP_SET_WRITEBACK, (void *)&set_wb);
+
+ /* src local path */
+ cfg = readl(GSC_IN_CON);
+ cfg &= ~(GSC_IN_PATH_MASK | GSC_IN_LOCAL_SEL_MASK);
+ cfg |= (GSC_IN_PATH_LOCAL | GSC_IN_LOCAL_FIMD_WB);
+ gsc_write(cfg, GSC_IN_CON);
+
+ /* dst dma memory */
+ cfg = gsc_read(GSC_OUT_CON);
+ cfg |= GSC_OUT_PATH_MEMORY;
+ gsc_write(cfg, GSC_OUT_CON);
+ break;
+ case IPP_CMD_OUTPUT:
+ /* src dma memory */
+ cfg = gsc_read(GSC_IN_CON);
+ cfg &= ~(GSC_IN_PATH_MASK | GSC_IN_LOCAL_SEL_MASK);
+ cfg |= GSC_IN_PATH_MEMORY;
+ gsc_write(cfg, GSC_IN_CON);
+
+ /* dst local path */
+ cfg = gsc_read(GSC_OUT_CON);
+ cfg |= GSC_OUT_PATH_MEMORY;
+ gsc_write(cfg, GSC_OUT_CON);
+ break;
+ default:
+ ret = -EINVAL;
+ dev_err(dev, "invalid operations.\n");
+ return ret;
+ }
+
+ ret = gsc_set_prescaler(ctx, &ctx->sc,
+ &img_pos[EXYNOS_DRM_OPS_SRC],
+ &img_pos[EXYNOS_DRM_OPS_DST]);
+ if (ret) {
+ dev_err(dev, "failed to set precalser.\n");
+ return ret;
+ }
+
+ gsc_set_scaler(ctx, &ctx->sc);
+
+ cfg = gsc_read(GSC_ENABLE);
+ cfg |= GSC_ENABLE_ON;
+ gsc_write(cfg, GSC_ENABLE);
+
+ return 0;
+}
+
+static void gsc_ippdrv_stop(struct device *dev, enum drm_exynos_ipp_cmd cmd)
+{
+ struct gsc_context *ctx = get_gsc_context(dev);
+ struct drm_exynos_ipp_set_wb set_wb = {0, 0};
+ u32 cfg;
+
+ DRM_DEBUG_KMS("%s:cmd[%d]\n", __func__, cmd);
+
+ switch (cmd) {
+ case IPP_CMD_M2M:
+ /* bypass */
+ break;
+ case IPP_CMD_WB:
+ gsc_set_gscblk_fimd_wb(ctx, set_wb.enable);
+ exynos_drm_ippnb_send_event(IPP_SET_WRITEBACK, (void *)&set_wb);
+ break;
+ case IPP_CMD_OUTPUT:
+ default:
+ dev_err(dev, "invalid operations.\n");
+ break;
+ }
+
+ gsc_handle_irq(ctx, false, false, true);
+
+ /* reset sequence */
+ gsc_write(0xff, GSC_OUT_BASE_ADDR_Y_MASK);
+ gsc_write(0xff, GSC_OUT_BASE_ADDR_CB_MASK);
+ gsc_write(0xff, GSC_OUT_BASE_ADDR_CR_MASK);
+
+ cfg = gsc_read(GSC_ENABLE);
+ cfg &= ~GSC_ENABLE_ON;
+ gsc_write(cfg, GSC_ENABLE);
+}
+
+static int __devinit gsc_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct gsc_context *ctx;
+ struct resource *res;
+ struct exynos_drm_ippdrv *ippdrv;
+ int ret;
+
+ ctx = devm_kzalloc(dev, sizeof(*ctx), GFP_KERNEL);
+ if (!ctx)
+ return -ENOMEM;
+
+ /* clock control */
+ ctx->gsc_clk = clk_get(dev, "gscl");
+ if (IS_ERR(ctx->gsc_clk)) {
+ dev_err(dev, "failed to get gsc clock.\n");
+ ret = PTR_ERR(ctx->gsc_clk);
+ goto err_ctx;
+ }
+
+ /* resource memory */
+ ctx->regs_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!ctx->regs_res) {
+ dev_err(dev, "failed to find registers.\n");
+ ret = -ENOENT;
+ goto err_clk;
+ }
+
+ ctx->regs = devm_request_and_ioremap(dev, ctx->regs_res);
+ if (!ctx->regs) {
+ dev_err(dev, "failed to map registers.\n");
+ ret = -ENXIO;
+ goto err_clk;
+ }
+
+ /* resource irq */
+ res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
+ if (!res) {
+ dev_err(dev, "failed to request irq resource.\n");
+ ret = -ENOENT;
+ goto err_get_regs;
+ }
+
+ ctx->irq = res->start;
+ ret = request_threaded_irq(ctx->irq, NULL, gsc_irq_handler,
+ IRQF_ONESHOT, "drm_gsc", ctx);
+ if (ret < 0) {
+ dev_err(dev, "failed to request irq.\n");
+ goto err_get_regs;
+ }
+
+ /* context initailization */
+ ctx->id = pdev->id;
+
+ ippdrv = &ctx->ippdrv;
+ ippdrv->dev = dev;
+ ippdrv->ops[EXYNOS_DRM_OPS_SRC] = &gsc_src_ops;
+ ippdrv->ops[EXYNOS_DRM_OPS_DST] = &gsc_dst_ops;
+ ippdrv->check_property = gsc_ippdrv_check_property;
+ ippdrv->reset = gsc_ippdrv_reset;
+ ippdrv->start = gsc_ippdrv_start;
+ ippdrv->stop = gsc_ippdrv_stop;
+ ret = gsc_init_prop_list(ippdrv);
+ if (ret < 0) {
+ dev_err(dev, "failed to init property list.\n");
+ goto err_get_irq;
+ }
+
+ DRM_DEBUG_KMS("%s:id[%d]ippdrv[0x%x]\n", __func__, ctx->id,
+ (int)ippdrv);
+
+ mutex_init(&ctx->lock);
+ platform_set_drvdata(pdev, ctx);
+
+ pm_runtime_set_active(dev);
+ pm_runtime_enable(dev);
+
+ ret = exynos_drm_ippdrv_register(ippdrv);
+ if (ret < 0) {
+ dev_err(dev, "failed to register drm gsc device.\n");
+ goto err_ippdrv_register;
+ }
+
+ dev_info(&pdev->dev, "drm gsc registered successfully.\n");
+
+ return 0;
+
+err_ippdrv_register:
+ devm_kfree(dev, ippdrv->prop_list);
+ pm_runtime_disable(dev);
+err_get_irq:
+ free_irq(ctx->irq, ctx);
+err_get_regs:
+ devm_iounmap(dev, ctx->regs);
+err_clk:
+ clk_put(ctx->gsc_clk);
+err_ctx:
+ devm_kfree(dev, ctx);
+ return ret;
+}
+
+static int __devexit gsc_remove(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct gsc_context *ctx = get_gsc_context(dev);
+ struct exynos_drm_ippdrv *ippdrv = &ctx->ippdrv;
+
+ devm_kfree(dev, ippdrv->prop_list);
+ exynos_drm_ippdrv_unregister(ippdrv);
+ mutex_destroy(&ctx->lock);
+
+ pm_runtime_set_suspended(dev);
+ pm_runtime_disable(dev);
+
+ free_irq(ctx->irq, ctx);
+ devm_iounmap(dev, ctx->regs);
+
+ clk_put(ctx->gsc_clk);
+
+ devm_kfree(dev, ctx);
+
+ return 0;
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int gsc_suspend(struct device *dev)
+{
+ struct gsc_context *ctx = get_gsc_context(dev);
+
+ DRM_DEBUG_KMS("%s:id[%d]\n", __func__, ctx->id);
+
+ if (pm_runtime_suspended(dev))
+ return 0;
+
+ return gsc_clk_ctrl(ctx, false);
+}
+
+static int gsc_resume(struct device *dev)
+{
+ struct gsc_context *ctx = get_gsc_context(dev);
+
+ DRM_DEBUG_KMS("%s:id[%d]\n", __func__, ctx->id);
+
+ if (!pm_runtime_suspended(dev))
+ return gsc_clk_ctrl(ctx, true);
+
+ return 0;
+}
+#endif
+
+#ifdef CONFIG_PM_RUNTIME
+static int gsc_runtime_suspend(struct device *dev)
+{
+ struct gsc_context *ctx = get_gsc_context(dev);
+
+ DRM_DEBUG_KMS("%s:id[%d]\n", __func__, ctx->id);
+
+ return gsc_clk_ctrl(ctx, false);
+}
+
+static int gsc_runtime_resume(struct device *dev)
+{
+ struct gsc_context *ctx = get_gsc_context(dev);
+
+ DRM_DEBUG_KMS("%s:id[%d]\n", __FILE__, ctx->id);
+
+ return gsc_clk_ctrl(ctx, true);
+}
+#endif
+
+static const struct dev_pm_ops gsc_pm_ops = {
+ SET_SYSTEM_SLEEP_PM_OPS(gsc_suspend, gsc_resume)
+ SET_RUNTIME_PM_OPS(gsc_runtime_suspend, gsc_runtime_resume, NULL)
+};
+
+struct platform_driver gsc_driver = {
+ .probe = gsc_probe,
+ .remove = __devexit_p(gsc_remove),
+ .driver = {
+ .name = "exynos-drm-gsc",
+ .owner = THIS_MODULE,
+ .pm = &gsc_pm_ops,
+ },
+};
+
diff --git a/drivers/gpu/drm/exynos/exynos_drm_gsc.h b/drivers/gpu/drm/exynos/exynos_drm_gsc.h
new file mode 100644
index 000000000000..b3c3bc618c0f
--- /dev/null
+++ b/drivers/gpu/drm/exynos/exynos_drm_gsc.h
@@ -0,0 +1,38 @@
+/*
+ * Copyright (c) 2012 Samsung Electronics Co., Ltd.
+ *
+ * Authors:
+ * Eunchul Kim <chulspro.kim@samsung.com>
+ * Jinyoung Jeon <jy0.jeon@samsung.com>
+ * Sangmin Lee <lsmin.lee@samsung.com>
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#ifndef _EXYNOS_DRM_GSC_H_
+#define _EXYNOS_DRM_GSC_H_
+
+/*
+ * TODO
+ * FIMD output interface notifier callback.
+ * Mixer output interface notifier callback.
+ */
+
+#endif /* _EXYNOS_DRM_GSC_H_ */
diff --git a/drivers/gpu/drm/exynos/exynos_drm_hdmi.c b/drivers/gpu/drm/exynos/exynos_drm_hdmi.c
index c3b9e2b45185..55793c46e3c2 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_hdmi.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_hdmi.c
@@ -29,6 +29,9 @@
#define get_ctx_from_subdrv(subdrv) container_of(subdrv,\
struct drm_hdmi_context, subdrv);
+/* platform device pointer for common drm hdmi device. */
+static struct platform_device *exynos_drm_hdmi_pdev;
+
/* Common hdmi subdrv needs to access the hdmi and mixer though context.
* These should be initialied by the repective drivers */
static struct exynos_drm_hdmi_context *hdmi_ctx;
@@ -46,6 +49,25 @@ struct drm_hdmi_context {
bool enabled[MIXER_WIN_NR];
};
+int exynos_platform_device_hdmi_register(void)
+{
+ if (exynos_drm_hdmi_pdev)
+ return -EEXIST;
+
+ exynos_drm_hdmi_pdev = platform_device_register_simple(
+ "exynos-drm-hdmi", -1, NULL, 0);
+ if (IS_ERR_OR_NULL(exynos_drm_hdmi_pdev))
+ return PTR_ERR(exynos_drm_hdmi_pdev);
+
+ return 0;
+}
+
+void exynos_platform_device_hdmi_unregister(void)
+{
+ if (exynos_drm_hdmi_pdev)
+ platform_device_unregister(exynos_drm_hdmi_pdev);
+}
+
void exynos_hdmi_drv_attach(struct exynos_drm_hdmi_context *ctx)
{
if (ctx)
@@ -157,6 +179,16 @@ static void drm_hdmi_disable_vblank(struct device *subdrv_dev)
return mixer_ops->disable_vblank(ctx->mixer_ctx->ctx);
}
+static void drm_hdmi_wait_for_vblank(struct device *subdrv_dev)
+{
+ struct drm_hdmi_context *ctx = to_context(subdrv_dev);
+
+ DRM_DEBUG_KMS("%s\n", __FILE__);
+
+ if (mixer_ops && mixer_ops->wait_for_vblank)
+ mixer_ops->wait_for_vblank(ctx->mixer_ctx->ctx);
+}
+
static void drm_hdmi_mode_fixup(struct device *subdrv_dev,
struct drm_connector *connector,
const struct drm_display_mode *mode,
@@ -238,6 +270,7 @@ static struct exynos_drm_manager_ops drm_hdmi_manager_ops = {
.apply = drm_hdmi_apply,
.enable_vblank = drm_hdmi_enable_vblank,
.disable_vblank = drm_hdmi_disable_vblank,
+ .wait_for_vblank = drm_hdmi_wait_for_vblank,
.mode_fixup = drm_hdmi_mode_fixup,
.mode_set = drm_hdmi_mode_set,
.get_max_resol = drm_hdmi_get_max_resol,
@@ -291,21 +324,10 @@ static void drm_mixer_disable(struct device *subdrv_dev, int zpos)
ctx->enabled[win] = false;
}
-static void drm_mixer_wait_for_vblank(struct device *subdrv_dev)
-{
- struct drm_hdmi_context *ctx = to_context(subdrv_dev);
-
- DRM_DEBUG_KMS("%s\n", __FILE__);
-
- if (mixer_ops && mixer_ops->wait_for_vblank)
- mixer_ops->wait_for_vblank(ctx->mixer_ctx->ctx);
-}
-
static struct exynos_drm_overlay_ops drm_hdmi_overlay_ops = {
.mode_set = drm_mixer_mode_set,
.commit = drm_mixer_commit,
.disable = drm_mixer_disable,
- .wait_for_vblank = drm_mixer_wait_for_vblank,
};
static struct exynos_drm_manager hdmi_manager = {
@@ -346,9 +368,23 @@ static int hdmi_subdrv_probe(struct drm_device *drm_dev,
ctx->hdmi_ctx->drm_dev = drm_dev;
ctx->mixer_ctx->drm_dev = drm_dev;
+ if (mixer_ops->iommu_on)
+ mixer_ops->iommu_on(ctx->mixer_ctx->ctx, true);
+
return 0;
}
+static void hdmi_subdrv_remove(struct drm_device *drm_dev, struct device *dev)
+{
+ struct drm_hdmi_context *ctx;
+ struct exynos_drm_subdrv *subdrv = to_subdrv(dev);
+
+ ctx = get_ctx_from_subdrv(subdrv);
+
+ if (mixer_ops->iommu_on)
+ mixer_ops->iommu_on(ctx->mixer_ctx->ctx, false);
+}
+
static int __devinit exynos_drm_hdmi_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
@@ -368,6 +404,7 @@ static int __devinit exynos_drm_hdmi_probe(struct platform_device *pdev)
subdrv->dev = dev;
subdrv->manager = &hdmi_manager;
subdrv->probe = hdmi_subdrv_probe;
+ subdrv->remove = hdmi_subdrv_remove;
platform_set_drvdata(pdev, subdrv);
diff --git a/drivers/gpu/drm/exynos/exynos_drm_hdmi.h b/drivers/gpu/drm/exynos/exynos_drm_hdmi.h
index 2da5ffd3a059..fcc3093ec8fe 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_hdmi.h
+++ b/drivers/gpu/drm/exynos/exynos_drm_hdmi.h
@@ -62,12 +62,13 @@ struct exynos_hdmi_ops {
struct exynos_mixer_ops {
/* manager */
+ int (*iommu_on)(void *ctx, bool enable);
int (*enable_vblank)(void *ctx, int pipe);
void (*disable_vblank)(void *ctx);
+ void (*wait_for_vblank)(void *ctx);
void (*dpms)(void *ctx, int mode);
/* overlay */
- void (*wait_for_vblank)(void *ctx);
void (*win_mode_set)(void *ctx, struct exynos_drm_overlay *overlay);
void (*win_commit)(void *ctx, int zpos);
void (*win_disable)(void *ctx, int zpos);
diff --git a/drivers/gpu/drm/exynos/exynos_drm_iommu.c b/drivers/gpu/drm/exynos/exynos_drm_iommu.c
new file mode 100644
index 000000000000..2482b7f96341
--- /dev/null
+++ b/drivers/gpu/drm/exynos/exynos_drm_iommu.c
@@ -0,0 +1,150 @@
+/* exynos_drm_iommu.c
+ *
+ * Copyright (c) 2012 Samsung Electronics Co., Ltd.
+ * Author: Inki Dae <inki.dae@samsung.com>
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#include <drmP.h>
+#include <drm/exynos_drm.h>
+
+#include <linux/dma-mapping.h>
+#include <linux/iommu.h>
+#include <linux/kref.h>
+
+#include <asm/dma-iommu.h>
+
+#include "exynos_drm_drv.h"
+#include "exynos_drm_iommu.h"
+
+/*
+ * drm_create_iommu_mapping - create a mapping structure
+ *
+ * @drm_dev: DRM device
+ */
+int drm_create_iommu_mapping(struct drm_device *drm_dev)
+{
+ struct dma_iommu_mapping *mapping = NULL;
+ struct exynos_drm_private *priv = drm_dev->dev_private;
+ struct device *dev = drm_dev->dev;
+
+ if (!priv->da_start)
+ priv->da_start = EXYNOS_DEV_ADDR_START;
+ if (!priv->da_space_size)
+ priv->da_space_size = EXYNOS_DEV_ADDR_SIZE;
+ if (!priv->da_space_order)
+ priv->da_space_order = EXYNOS_DEV_ADDR_ORDER;
+
+ mapping = arm_iommu_create_mapping(&platform_bus_type, priv->da_start,
+ priv->da_space_size,
+ priv->da_space_order);
+ if (IS_ERR(mapping))
+ return PTR_ERR(mapping);
+
+ dev->dma_parms = devm_kzalloc(dev, sizeof(*dev->dma_parms),
+ GFP_KERNEL);
+ dma_set_max_seg_size(dev, 0xffffffffu);
+ dev->archdata.mapping = mapping;
+
+ return 0;
+}
+
+/*
+ * drm_release_iommu_mapping - release iommu mapping structure
+ *
+ * @drm_dev: DRM device
+ *
+ * if mapping->kref becomes 0 then all things related to iommu mapping
+ * will be released
+ */
+void drm_release_iommu_mapping(struct drm_device *drm_dev)
+{
+ struct device *dev = drm_dev->dev;
+
+ arm_iommu_release_mapping(dev->archdata.mapping);
+}
+
+/*
+ * drm_iommu_attach_device- attach device to iommu mapping
+ *
+ * @drm_dev: DRM device
+ * @subdrv_dev: device to be attach
+ *
+ * This function should be called by sub drivers to attach it to iommu
+ * mapping.
+ */
+int drm_iommu_attach_device(struct drm_device *drm_dev,
+ struct device *subdrv_dev)
+{
+ struct device *dev = drm_dev->dev;
+ int ret;
+
+ if (!dev->archdata.mapping) {
+ DRM_ERROR("iommu_mapping is null.\n");
+ return -EFAULT;
+ }
+
+ subdrv_dev->dma_parms = devm_kzalloc(subdrv_dev,
+ sizeof(*subdrv_dev->dma_parms),
+ GFP_KERNEL);
+ dma_set_max_seg_size(subdrv_dev, 0xffffffffu);
+
+ ret = arm_iommu_attach_device(subdrv_dev, dev->archdata.mapping);
+ if (ret < 0) {
+ DRM_DEBUG_KMS("failed iommu attach.\n");
+ return ret;
+ }
+
+ /*
+ * Set dma_ops to drm_device just one time.
+ *
+ * The dma mapping api needs device object and the api is used
+ * to allocate physial memory and map it with iommu table.
+ * If iommu attach succeeded, the sub driver would have dma_ops
+ * for iommu and also all sub drivers have same dma_ops.
+ */
+ if (!dev->archdata.dma_ops)
+ dev->archdata.dma_ops = subdrv_dev->archdata.dma_ops;
+
+ return 0;
+}
+
+/*
+ * drm_iommu_detach_device -detach device address space mapping from device
+ *
+ * @drm_dev: DRM device
+ * @subdrv_dev: device to be detached
+ *
+ * This function should be called by sub drivers to detach it from iommu
+ * mapping
+ */
+void drm_iommu_detach_device(struct drm_device *drm_dev,
+ struct device *subdrv_dev)
+{
+ struct device *dev = drm_dev->dev;
+ struct dma_iommu_mapping *mapping = dev->archdata.mapping;
+
+ if (!mapping || !mapping->domain)
+ return;
+
+ iommu_detach_device(mapping->domain, subdrv_dev);
+ drm_release_iommu_mapping(drm_dev);
+}
diff --git a/drivers/gpu/drm/exynos/exynos_drm_iommu.h b/drivers/gpu/drm/exynos/exynos_drm_iommu.h
new file mode 100644
index 000000000000..18a0ca190b98
--- /dev/null
+++ b/drivers/gpu/drm/exynos/exynos_drm_iommu.h
@@ -0,0 +1,85 @@
+/* exynos_drm_iommu.h
+ *
+ * Copyright (c) 2012 Samsung Electronics Co., Ltd.
+ * Authoer: Inki Dae <inki.dae@samsung.com>
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#ifndef _EXYNOS_DRM_IOMMU_H_
+#define _EXYNOS_DRM_IOMMU_H_
+
+#define EXYNOS_DEV_ADDR_START 0x20000000
+#define EXYNOS_DEV_ADDR_SIZE 0x40000000
+#define EXYNOS_DEV_ADDR_ORDER 0x4
+
+#ifdef CONFIG_DRM_EXYNOS_IOMMU
+
+int drm_create_iommu_mapping(struct drm_device *drm_dev);
+
+void drm_release_iommu_mapping(struct drm_device *drm_dev);
+
+int drm_iommu_attach_device(struct drm_device *drm_dev,
+ struct device *subdrv_dev);
+
+void drm_iommu_detach_device(struct drm_device *dev_dev,
+ struct device *subdrv_dev);
+
+static inline bool is_drm_iommu_supported(struct drm_device *drm_dev)
+{
+#ifdef CONFIG_ARM_DMA_USE_IOMMU
+ struct device *dev = drm_dev->dev;
+
+ return dev->archdata.mapping ? true : false;
+#else
+ return false;
+#endif
+}
+
+#else
+
+struct dma_iommu_mapping;
+static inline int drm_create_iommu_mapping(struct drm_device *drm_dev)
+{
+ return 0;
+}
+
+static inline void drm_release_iommu_mapping(struct drm_device *drm_dev)
+{
+}
+
+static inline int drm_iommu_attach_device(struct drm_device *drm_dev,
+ struct device *subdrv_dev)
+{
+ return 0;
+}
+
+static inline void drm_iommu_detach_device(struct drm_device *drm_dev,
+ struct device *subdrv_dev)
+{
+}
+
+static inline bool is_drm_iommu_supported(struct drm_device *drm_dev)
+{
+ return false;
+}
+
+#endif
+#endif
diff --git a/drivers/gpu/drm/exynos/exynos_drm_ipp.c b/drivers/gpu/drm/exynos/exynos_drm_ipp.c
new file mode 100644
index 000000000000..49eebe948ed2
--- /dev/null
+++ b/drivers/gpu/drm/exynos/exynos_drm_ipp.c
@@ -0,0 +1,2060 @@
+/*
+ * Copyright (C) 2012 Samsung Electronics Co.Ltd
+ * Authors:
+ * Eunchul Kim <chulspro.kim@samsung.com>
+ * Jinyoung Jeon <jy0.jeon@samsung.com>
+ * Sangmin Lee <lsmin.lee@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ */
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/types.h>
+#include <linux/clk.h>
+#include <linux/pm_runtime.h>
+#include <plat/map-base.h>
+
+#include <drm/drmP.h>
+#include <drm/exynos_drm.h>
+#include "exynos_drm_drv.h"
+#include "exynos_drm_gem.h"
+#include "exynos_drm_ipp.h"
+#include "exynos_drm_iommu.h"
+
+/*
+ * IPP is stand for Image Post Processing and
+ * supports image scaler/rotator and input/output DMA operations.
+ * using FIMC, GSC, Rotator, so on.
+ * IPP is integration device driver of same attribute h/w
+ */
+
+/*
+ * TODO
+ * 1. expand command control id.
+ * 2. integrate property and config.
+ * 3. removed send_event id check routine.
+ * 4. compare send_event id if needed.
+ * 5. free subdrv_remove notifier callback list if needed.
+ * 6. need to check subdrv_open about multi-open.
+ * 7. need to power_on implement power and sysmmu ctrl.
+ */
+
+#define get_ipp_context(dev) platform_get_drvdata(to_platform_device(dev))
+#define ipp_is_m2m_cmd(c) (c == IPP_CMD_M2M)
+
+/*
+ * A structure of event.
+ *
+ * @base: base of event.
+ * @event: ipp event.
+ */
+struct drm_exynos_ipp_send_event {
+ struct drm_pending_event base;
+ struct drm_exynos_ipp_event event;
+};
+
+/*
+ * A structure of memory node.
+ *
+ * @list: list head to memory queue information.
+ * @ops_id: id of operations.
+ * @prop_id: id of property.
+ * @buf_id: id of buffer.
+ * @buf_info: gem objects and dma address, size.
+ * @filp: a pointer to drm_file.
+ */
+struct drm_exynos_ipp_mem_node {
+ struct list_head list;
+ enum drm_exynos_ops_id ops_id;
+ u32 prop_id;
+ u32 buf_id;
+ struct drm_exynos_ipp_buf_info buf_info;
+ struct drm_file *filp;
+};
+
+/*
+ * A structure of ipp context.
+ *
+ * @subdrv: prepare initialization using subdrv.
+ * @ipp_lock: lock for synchronization of access to ipp_idr.
+ * @prop_lock: lock for synchronization of access to prop_idr.
+ * @ipp_idr: ipp driver idr.
+ * @prop_idr: property idr.
+ * @event_workq: event work queue.
+ * @cmd_workq: command work queue.
+ */
+struct ipp_context {
+ struct exynos_drm_subdrv subdrv;
+ struct mutex ipp_lock;
+ struct mutex prop_lock;
+ struct idr ipp_idr;
+ struct idr prop_idr;
+ struct workqueue_struct *event_workq;
+ struct workqueue_struct *cmd_workq;
+};
+
+static LIST_HEAD(exynos_drm_ippdrv_list);
+static DEFINE_MUTEX(exynos_drm_ippdrv_lock);
+static BLOCKING_NOTIFIER_HEAD(exynos_drm_ippnb_list);
+
+int exynos_drm_ippdrv_register(struct exynos_drm_ippdrv *ippdrv)
+{
+ DRM_DEBUG_KMS("%s\n", __func__);
+
+ if (!ippdrv)
+ return -EINVAL;
+
+ mutex_lock(&exynos_drm_ippdrv_lock);
+ list_add_tail(&ippdrv->drv_list, &exynos_drm_ippdrv_list);
+ mutex_unlock(&exynos_drm_ippdrv_lock);
+
+ return 0;
+}
+
+int exynos_drm_ippdrv_unregister(struct exynos_drm_ippdrv *ippdrv)
+{
+ DRM_DEBUG_KMS("%s\n", __func__);
+
+ if (!ippdrv)
+ return -EINVAL;
+
+ mutex_lock(&exynos_drm_ippdrv_lock);
+ list_del(&ippdrv->drv_list);
+ mutex_unlock(&exynos_drm_ippdrv_lock);
+
+ return 0;
+}
+
+static int ipp_create_id(struct idr *id_idr, struct mutex *lock, void *obj,
+ u32 *idp)
+{
+ int ret;
+
+ DRM_DEBUG_KMS("%s\n", __func__);
+
+again:
+ /* ensure there is space available to allocate a handle */
+ if (idr_pre_get(id_idr, GFP_KERNEL) == 0) {
+ DRM_ERROR("failed to get idr.\n");
+ return -ENOMEM;
+ }
+
+ /* do the allocation under our mutexlock */
+ mutex_lock(lock);
+ ret = idr_get_new_above(id_idr, obj, 1, (int *)idp);
+ mutex_unlock(lock);
+ if (ret == -EAGAIN)
+ goto again;
+
+ return ret;
+}
+
+static void *ipp_find_obj(struct idr *id_idr, struct mutex *lock, u32 id)
+{
+ void *obj;
+
+ DRM_DEBUG_KMS("%s:id[%d]\n", __func__, id);
+
+ mutex_lock(lock);
+
+ /* find object using handle */
+ obj = idr_find(id_idr, id);
+ if (!obj) {
+ DRM_ERROR("failed to find object.\n");
+ mutex_unlock(lock);
+ return ERR_PTR(-ENODEV);
+ }
+
+ mutex_unlock(lock);
+
+ return obj;
+}
+
+static inline bool ipp_check_dedicated(struct exynos_drm_ippdrv *ippdrv,
+ enum drm_exynos_ipp_cmd cmd)
+{
+ /*
+ * check dedicated flag and WB, OUTPUT operation with
+ * power on state.
+ */
+ if (ippdrv->dedicated || (!ipp_is_m2m_cmd(cmd) &&
+ !pm_runtime_suspended(ippdrv->dev)))
+ return true;
+
+ return false;
+}
+
+static struct exynos_drm_ippdrv *ipp_find_driver(struct ipp_context *ctx,
+ struct drm_exynos_ipp_property *property)
+{
+ struct exynos_drm_ippdrv *ippdrv;
+ u32 ipp_id = property->ipp_id;
+
+ DRM_DEBUG_KMS("%s:ipp_id[%d]\n", __func__, ipp_id);
+
+ if (ipp_id) {
+ /* find ipp driver using idr */
+ ippdrv = ipp_find_obj(&ctx->ipp_idr, &ctx->ipp_lock,
+ ipp_id);
+ if (IS_ERR_OR_NULL(ippdrv)) {
+ DRM_ERROR("not found ipp%d driver.\n", ipp_id);
+ return ippdrv;
+ }
+
+ /*
+ * WB, OUTPUT opertion not supported multi-operation.
+ * so, make dedicated state at set property ioctl.
+ * when ipp driver finished operations, clear dedicated flags.
+ */
+ if (ipp_check_dedicated(ippdrv, property->cmd)) {
+ DRM_ERROR("already used choose device.\n");
+ return ERR_PTR(-EBUSY);
+ }
+
+ /*
+ * This is necessary to find correct device in ipp drivers.
+ * ipp drivers have different abilities,
+ * so need to check property.
+ */
+ if (ippdrv->check_property &&
+ ippdrv->check_property(ippdrv->dev, property)) {
+ DRM_ERROR("not support property.\n");
+ return ERR_PTR(-EINVAL);
+ }
+
+ return ippdrv;
+ } else {
+ /*
+ * This case is search all ipp driver for finding.
+ * user application don't set ipp_id in this case,
+ * so ipp subsystem search correct driver in driver list.
+ */
+ list_for_each_entry(ippdrv, &exynos_drm_ippdrv_list, drv_list) {
+ if (ipp_check_dedicated(ippdrv, property->cmd)) {
+ DRM_DEBUG_KMS("%s:used device.\n", __func__);
+ continue;
+ }
+
+ if (ippdrv->check_property &&
+ ippdrv->check_property(ippdrv->dev, property)) {
+ DRM_DEBUG_KMS("%s:not support property.\n",
+ __func__);
+ continue;
+ }
+
+ return ippdrv;
+ }
+
+ DRM_ERROR("not support ipp driver operations.\n");
+ }
+
+ return ERR_PTR(-ENODEV);
+}
+
+static struct exynos_drm_ippdrv *ipp_find_drv_by_handle(u32 prop_id)
+{
+ struct exynos_drm_ippdrv *ippdrv;
+ struct drm_exynos_ipp_cmd_node *c_node;
+ int count = 0;
+
+ DRM_DEBUG_KMS("%s:prop_id[%d]\n", __func__, prop_id);
+
+ if (list_empty(&exynos_drm_ippdrv_list)) {
+ DRM_DEBUG_KMS("%s:ippdrv_list is empty.\n", __func__);
+ return ERR_PTR(-ENODEV);
+ }
+
+ /*
+ * This case is search ipp driver by prop_id handle.
+ * sometimes, ipp subsystem find driver by prop_id.
+ * e.g PAUSE state, queue buf, command contro.
+ */
+ list_for_each_entry(ippdrv, &exynos_drm_ippdrv_list, drv_list) {
+ DRM_DEBUG_KMS("%s:count[%d]ippdrv[0x%x]\n", __func__,
+ count++, (int)ippdrv);
+
+ if (!list_empty(&ippdrv->cmd_list)) {
+ list_for_each_entry(c_node, &ippdrv->cmd_list, list)
+ if (c_node->property.prop_id == prop_id)
+ return ippdrv;
+ }
+ }
+
+ return ERR_PTR(-ENODEV);
+}
+
+int exynos_drm_ipp_get_property(struct drm_device *drm_dev, void *data,
+ struct drm_file *file)
+{
+ struct drm_exynos_file_private *file_priv = file->driver_priv;
+ struct exynos_drm_ipp_private *priv = file_priv->ipp_priv;
+ struct device *dev = priv->dev;
+ struct ipp_context *ctx = get_ipp_context(dev);
+ struct drm_exynos_ipp_prop_list *prop_list = data;
+ struct exynos_drm_ippdrv *ippdrv;
+ int count = 0;
+
+ DRM_DEBUG_KMS("%s\n", __func__);
+
+ if (!ctx) {
+ DRM_ERROR("invalid context.\n");
+ return -EINVAL;
+ }
+
+ if (!prop_list) {
+ DRM_ERROR("invalid property parameter.\n");
+ return -EINVAL;
+ }
+
+ DRM_DEBUG_KMS("%s:ipp_id[%d]\n", __func__, prop_list->ipp_id);
+
+ if (!prop_list->ipp_id) {
+ list_for_each_entry(ippdrv, &exynos_drm_ippdrv_list, drv_list)
+ count++;
+ /*
+ * Supports ippdrv list count for user application.
+ * First step user application getting ippdrv count.
+ * and second step getting ippdrv capability using ipp_id.
+ */
+ prop_list->count = count;
+ } else {
+ /*
+ * Getting ippdrv capability by ipp_id.
+ * some deivce not supported wb, output interface.
+ * so, user application detect correct ipp driver
+ * using this ioctl.
+ */
+ ippdrv = ipp_find_obj(&ctx->ipp_idr, &ctx->ipp_lock,
+ prop_list->ipp_id);
+ if (!ippdrv) {
+ DRM_ERROR("not found ipp%d driver.\n",
+ prop_list->ipp_id);
+ return -EINVAL;
+ }
+
+ prop_list = ippdrv->prop_list;
+ }
+
+ return 0;
+}
+
+static void ipp_print_property(struct drm_exynos_ipp_property *property,
+ int idx)
+{
+ struct drm_exynos_ipp_config *config = &property->config[idx];
+ struct drm_exynos_pos *pos = &config->pos;
+ struct drm_exynos_sz *sz = &config->sz;
+
+ DRM_DEBUG_KMS("%s:prop_id[%d]ops[%s]fmt[0x%x]\n",
+ __func__, property->prop_id, idx ? "dst" : "src", config->fmt);
+
+ DRM_DEBUG_KMS("%s:pos[%d %d %d %d]sz[%d %d]f[%d]r[%d]\n",
+ __func__, pos->x, pos->y, pos->w, pos->h,
+ sz->hsize, sz->vsize, config->flip, config->degree);
+}
+
+static int ipp_find_and_set_property(struct drm_exynos_ipp_property *property)
+{
+ struct exynos_drm_ippdrv *ippdrv;
+ struct drm_exynos_ipp_cmd_node *c_node;
+ u32 prop_id = property->prop_id;
+
+ DRM_DEBUG_KMS("%s:prop_id[%d]\n", __func__, prop_id);
+
+ ippdrv = ipp_find_drv_by_handle(prop_id);
+ if (IS_ERR_OR_NULL(ippdrv)) {
+ DRM_ERROR("failed to get ipp driver.\n");
+ return -EINVAL;
+ }
+
+ /*
+ * Find command node using command list in ippdrv.
+ * when we find this command no using prop_id.
+ * return property information set in this command node.
+ */
+ list_for_each_entry(c_node, &ippdrv->cmd_list, list) {
+ if ((c_node->property.prop_id == prop_id) &&
+ (c_node->state == IPP_STATE_STOP)) {
+ DRM_DEBUG_KMS("%s:found cmd[%d]ippdrv[0x%x]\n",
+ __func__, property->cmd, (int)ippdrv);
+
+ c_node->property = *property;
+ return 0;
+ }
+ }
+
+ DRM_ERROR("failed to search property.\n");
+
+ return -EINVAL;
+}
+
+static struct drm_exynos_ipp_cmd_work *ipp_create_cmd_work(void)
+{
+ struct drm_exynos_ipp_cmd_work *cmd_work;
+
+ DRM_DEBUG_KMS("%s\n", __func__);
+
+ cmd_work = kzalloc(sizeof(*cmd_work), GFP_KERNEL);
+ if (!cmd_work) {
+ DRM_ERROR("failed to alloc cmd_work.\n");
+ return ERR_PTR(-ENOMEM);
+ }
+
+ INIT_WORK((struct work_struct *)cmd_work, ipp_sched_cmd);
+
+ return cmd_work;
+}
+
+static struct drm_exynos_ipp_event_work *ipp_create_event_work(void)
+{
+ struct drm_exynos_ipp_event_work *event_work;
+
+ DRM_DEBUG_KMS("%s\n", __func__);
+
+ event_work = kzalloc(sizeof(*event_work), GFP_KERNEL);
+ if (!event_work) {
+ DRM_ERROR("failed to alloc event_work.\n");
+ return ERR_PTR(-ENOMEM);
+ }
+
+ INIT_WORK((struct work_struct *)event_work, ipp_sched_event);
+
+ return event_work;
+}
+
+int exynos_drm_ipp_set_property(struct drm_device *drm_dev, void *data,
+ struct drm_file *file)
+{
+ struct drm_exynos_file_private *file_priv = file->driver_priv;
+ struct exynos_drm_ipp_private *priv = file_priv->ipp_priv;
+ struct device *dev = priv->dev;
+ struct ipp_context *ctx = get_ipp_context(dev);
+ struct drm_exynos_ipp_property *property = data;
+ struct exynos_drm_ippdrv *ippdrv;
+ struct drm_exynos_ipp_cmd_node *c_node;
+ int ret, i;
+
+ DRM_DEBUG_KMS("%s\n", __func__);
+
+ if (!ctx) {
+ DRM_ERROR("invalid context.\n");
+ return -EINVAL;
+ }
+
+ if (!property) {
+ DRM_ERROR("invalid property parameter.\n");
+ return -EINVAL;
+ }
+
+ /*
+ * This is log print for user application property.
+ * user application set various property.
+ */
+ for_each_ipp_ops(i)
+ ipp_print_property(property, i);
+
+ /*
+ * set property ioctl generated new prop_id.
+ * but in this case already asigned prop_id using old set property.
+ * e.g PAUSE state. this case supports find current prop_id and use it
+ * instead of allocation.
+ */
+ if (property->prop_id) {
+ DRM_DEBUG_KMS("%s:prop_id[%d]\n", __func__, property->prop_id);
+ return ipp_find_and_set_property(property);
+ }
+
+ /* find ipp driver using ipp id */
+ ippdrv = ipp_find_driver(ctx, property);
+ if (IS_ERR_OR_NULL(ippdrv)) {
+ DRM_ERROR("failed to get ipp driver.\n");
+ return -EINVAL;
+ }
+
+ /* allocate command node */
+ c_node = kzalloc(sizeof(*c_node), GFP_KERNEL);
+ if (!c_node) {
+ DRM_ERROR("failed to allocate map node.\n");
+ return -ENOMEM;
+ }
+
+ /* create property id */
+ ret = ipp_create_id(&ctx->prop_idr, &ctx->prop_lock, c_node,
+ &property->prop_id);
+ if (ret) {
+ DRM_ERROR("failed to create id.\n");
+ goto err_clear;
+ }
+
+ DRM_DEBUG_KMS("%s:created prop_id[%d]cmd[%d]ippdrv[0x%x]\n",
+ __func__, property->prop_id, property->cmd, (int)ippdrv);
+
+ /* stored property information and ippdrv in private data */
+ c_node->priv = priv;
+ c_node->property = *property;
+ c_node->state = IPP_STATE_IDLE;
+
+ c_node->start_work = ipp_create_cmd_work();
+ if (IS_ERR_OR_NULL(c_node->start_work)) {
+ DRM_ERROR("failed to create start work.\n");
+ goto err_clear;
+ }
+
+ c_node->stop_work = ipp_create_cmd_work();
+ if (IS_ERR_OR_NULL(c_node->stop_work)) {
+ DRM_ERROR("failed to create stop work.\n");
+ goto err_free_start;
+ }
+
+ c_node->event_work = ipp_create_event_work();
+ if (IS_ERR_OR_NULL(c_node->event_work)) {
+ DRM_ERROR("failed to create event work.\n");
+ goto err_free_stop;
+ }
+
+ mutex_init(&c_node->cmd_lock);
+ mutex_init(&c_node->mem_lock);
+ mutex_init(&c_node->event_lock);
+
+ init_completion(&c_node->start_complete);
+ init_completion(&c_node->stop_complete);
+
+ for_each_ipp_ops(i)
+ INIT_LIST_HEAD(&c_node->mem_list[i]);
+
+ INIT_LIST_HEAD(&c_node->event_list);
+ list_splice_init(&priv->event_list, &c_node->event_list);
+ list_add_tail(&c_node->list, &ippdrv->cmd_list);
+
+ /* make dedicated state without m2m */
+ if (!ipp_is_m2m_cmd(property->cmd))
+ ippdrv->dedicated = true;
+
+ return 0;
+
+err_free_stop:
+ kfree(c_node->stop_work);
+err_free_start:
+ kfree(c_node->start_work);
+err_clear:
+ kfree(c_node);
+ return ret;
+}
+
+static void ipp_clean_cmd_node(struct drm_exynos_ipp_cmd_node *c_node)
+{
+ DRM_DEBUG_KMS("%s\n", __func__);
+
+ /* delete list */
+ list_del(&c_node->list);
+
+ /* destroy mutex */
+ mutex_destroy(&c_node->cmd_lock);
+ mutex_destroy(&c_node->mem_lock);
+ mutex_destroy(&c_node->event_lock);
+
+ /* free command node */
+ kfree(c_node->start_work);
+ kfree(c_node->stop_work);
+ kfree(c_node->event_work);
+ kfree(c_node);
+}
+
+static int ipp_check_mem_list(struct drm_exynos_ipp_cmd_node *c_node)
+{
+ struct drm_exynos_ipp_property *property = &c_node->property;
+ struct drm_exynos_ipp_mem_node *m_node;
+ struct list_head *head;
+ int ret, i, count[EXYNOS_DRM_OPS_MAX] = { 0, };
+
+ DRM_DEBUG_KMS("%s\n", __func__);
+
+ mutex_lock(&c_node->mem_lock);
+
+ for_each_ipp_ops(i) {
+ /* source/destination memory list */
+ head = &c_node->mem_list[i];
+
+ if (list_empty(head)) {
+ DRM_DEBUG_KMS("%s:%s memory empty.\n", __func__,
+ i ? "dst" : "src");
+ continue;
+ }
+
+ /* find memory node entry */
+ list_for_each_entry(m_node, head, list) {
+ DRM_DEBUG_KMS("%s:%s,count[%d]m_node[0x%x]\n", __func__,
+ i ? "dst" : "src", count[i], (int)m_node);
+ count[i]++;
+ }
+ }
+
+ DRM_DEBUG_KMS("%s:min[%d]max[%d]\n", __func__,
+ min(count[EXYNOS_DRM_OPS_SRC], count[EXYNOS_DRM_OPS_DST]),
+ max(count[EXYNOS_DRM_OPS_SRC], count[EXYNOS_DRM_OPS_DST]));
+
+ /*
+ * M2M operations should be need paired memory address.
+ * so, need to check minimum count about src, dst.
+ * other case not use paired memory, so use maximum count
+ */
+ if (ipp_is_m2m_cmd(property->cmd))
+ ret = min(count[EXYNOS_DRM_OPS_SRC],
+ count[EXYNOS_DRM_OPS_DST]);
+ else
+ ret = max(count[EXYNOS_DRM_OPS_SRC],
+ count[EXYNOS_DRM_OPS_DST]);
+
+ mutex_unlock(&c_node->mem_lock);
+
+ return ret;
+}
+
+static struct drm_exynos_ipp_mem_node
+ *ipp_find_mem_node(struct drm_exynos_ipp_cmd_node *c_node,
+ struct drm_exynos_ipp_queue_buf *qbuf)
+{
+ struct drm_exynos_ipp_mem_node *m_node;
+ struct list_head *head;
+ int count = 0;
+
+ DRM_DEBUG_KMS("%s:buf_id[%d]\n", __func__, qbuf->buf_id);
+
+ /* source/destination memory list */
+ head = &c_node->mem_list[qbuf->ops_id];
+
+ /* find memory node from memory list */
+ list_for_each_entry(m_node, head, list) {
+ DRM_DEBUG_KMS("%s:count[%d]m_node[0x%x]\n",
+ __func__, count++, (int)m_node);
+
+ /* compare buffer id */
+ if (m_node->buf_id == qbuf->buf_id)
+ return m_node;
+ }
+
+ return NULL;
+}
+
+static int ipp_set_mem_node(struct exynos_drm_ippdrv *ippdrv,
+ struct drm_exynos_ipp_cmd_node *c_node,
+ struct drm_exynos_ipp_mem_node *m_node)
+{
+ struct exynos_drm_ipp_ops *ops = NULL;
+ int ret = 0;
+
+ DRM_DEBUG_KMS("%s:node[0x%x]\n", __func__, (int)m_node);
+
+ if (!m_node) {
+ DRM_ERROR("invalid queue node.\n");
+ return -EFAULT;
+ }
+
+ mutex_lock(&c_node->mem_lock);
+
+ DRM_DEBUG_KMS("%s:ops_id[%d]\n", __func__, m_node->ops_id);
+
+ /* get operations callback */
+ ops = ippdrv->ops[m_node->ops_id];
+ if (!ops) {
+ DRM_ERROR("not support ops.\n");
+ ret = -EFAULT;
+ goto err_unlock;
+ }
+
+ /* set address and enable irq */
+ if (ops->set_addr) {
+ ret = ops->set_addr(ippdrv->dev, &m_node->buf_info,
+ m_node->buf_id, IPP_BUF_ENQUEUE);
+ if (ret) {
+ DRM_ERROR("failed to set addr.\n");
+ goto err_unlock;
+ }
+ }
+
+err_unlock:
+ mutex_unlock(&c_node->mem_lock);
+ return ret;
+}
+
+static struct drm_exynos_ipp_mem_node
+ *ipp_get_mem_node(struct drm_device *drm_dev,
+ struct drm_file *file,
+ struct drm_exynos_ipp_cmd_node *c_node,
+ struct drm_exynos_ipp_queue_buf *qbuf)
+{
+ struct drm_exynos_ipp_mem_node *m_node;
+ struct drm_exynos_ipp_buf_info buf_info;
+ void *addr;
+ int i;
+
+ DRM_DEBUG_KMS("%s\n", __func__);
+
+ mutex_lock(&c_node->mem_lock);
+
+ m_node = kzalloc(sizeof(*m_node), GFP_KERNEL);
+ if (!m_node) {
+ DRM_ERROR("failed to allocate queue node.\n");
+ goto err_unlock;
+ }
+
+ /* clear base address for error handling */
+ memset(&buf_info, 0x0, sizeof(buf_info));
+
+ /* operations, buffer id */
+ m_node->ops_id = qbuf->ops_id;
+ m_node->prop_id = qbuf->prop_id;
+ m_node->buf_id = qbuf->buf_id;
+
+ DRM_DEBUG_KMS("%s:m_node[0x%x]ops_id[%d]\n", __func__,
+ (int)m_node, qbuf->ops_id);
+ DRM_DEBUG_KMS("%s:prop_id[%d]buf_id[%d]\n", __func__,
+ qbuf->prop_id, m_node->buf_id);
+
+ for_each_ipp_planar(i) {
+ DRM_DEBUG_KMS("%s:i[%d]handle[0x%x]\n", __func__,
+ i, qbuf->handle[i]);
+
+ /* get dma address by handle */
+ if (qbuf->handle[i]) {
+ addr = exynos_drm_gem_get_dma_addr(drm_dev,
+ qbuf->handle[i], file);
+ if (IS_ERR(addr)) {
+ DRM_ERROR("failed to get addr.\n");
+ goto err_clear;
+ }
+
+ buf_info.handles[i] = qbuf->handle[i];
+ buf_info.base[i] = *(dma_addr_t *) addr;
+ DRM_DEBUG_KMS("%s:i[%d]base[0x%x]hd[0x%x]\n",
+ __func__, i, buf_info.base[i],
+ (int)buf_info.handles[i]);
+ }
+ }
+
+ m_node->filp = file;
+ m_node->buf_info = buf_info;
+ list_add_tail(&m_node->list, &c_node->mem_list[qbuf->ops_id]);
+
+ mutex_unlock(&c_node->mem_lock);
+ return m_node;
+
+err_clear:
+ kfree(m_node);
+err_unlock:
+ mutex_unlock(&c_node->mem_lock);
+ return ERR_PTR(-EFAULT);
+}
+
+static int ipp_put_mem_node(struct drm_device *drm_dev,
+ struct drm_exynos_ipp_cmd_node *c_node,
+ struct drm_exynos_ipp_mem_node *m_node)
+{
+ int i;
+
+ DRM_DEBUG_KMS("%s:node[0x%x]\n", __func__, (int)m_node);
+
+ if (!m_node) {
+ DRM_ERROR("invalid dequeue node.\n");
+ return -EFAULT;
+ }
+
+ if (list_empty(&m_node->list)) {
+ DRM_ERROR("empty memory node.\n");
+ return -ENOMEM;
+ }
+
+ mutex_lock(&c_node->mem_lock);
+
+ DRM_DEBUG_KMS("%s:ops_id[%d]\n", __func__, m_node->ops_id);
+
+ /* put gem buffer */
+ for_each_ipp_planar(i) {
+ unsigned long handle = m_node->buf_info.handles[i];
+ if (handle)
+ exynos_drm_gem_put_dma_addr(drm_dev, handle,
+ m_node->filp);
+ }
+
+ /* delete list in queue */
+ list_del(&m_node->list);
+ kfree(m_node);
+
+ mutex_unlock(&c_node->mem_lock);
+
+ return 0;
+}
+
+static void ipp_free_event(struct drm_pending_event *event)
+{
+ kfree(event);
+}
+
+static int ipp_get_event(struct drm_device *drm_dev,
+ struct drm_file *file,
+ struct drm_exynos_ipp_cmd_node *c_node,
+ struct drm_exynos_ipp_queue_buf *qbuf)
+{
+ struct drm_exynos_ipp_send_event *e;
+ unsigned long flags;
+
+ DRM_DEBUG_KMS("%s:ops_id[%d]buf_id[%d]\n", __func__,
+ qbuf->ops_id, qbuf->buf_id);
+
+ e = kzalloc(sizeof(*e), GFP_KERNEL);
+
+ if (!e) {
+ DRM_ERROR("failed to allocate event.\n");
+ spin_lock_irqsave(&drm_dev->event_lock, flags);
+ file->event_space += sizeof(e->event);
+ spin_unlock_irqrestore(&drm_dev->event_lock, flags);
+ return -ENOMEM;
+ }
+
+ /* make event */
+ e->event.base.type = DRM_EXYNOS_IPP_EVENT;
+ e->event.base.length = sizeof(e->event);
+ e->event.user_data = qbuf->user_data;
+ e->event.prop_id = qbuf->prop_id;
+ e->event.buf_id[EXYNOS_DRM_OPS_DST] = qbuf->buf_id;
+ e->base.event = &e->event.base;
+ e->base.file_priv = file;
+ e->base.destroy = ipp_free_event;
+ list_add_tail(&e->base.link, &c_node->event_list);
+
+ return 0;
+}
+
+static void ipp_put_event(struct drm_exynos_ipp_cmd_node *c_node,
+ struct drm_exynos_ipp_queue_buf *qbuf)
+{
+ struct drm_exynos_ipp_send_event *e, *te;
+ int count = 0;
+
+ DRM_DEBUG_KMS("%s\n", __func__);
+
+ if (list_empty(&c_node->event_list)) {
+ DRM_DEBUG_KMS("%s:event_list is empty.\n", __func__);
+ return;
+ }
+
+ list_for_each_entry_safe(e, te, &c_node->event_list, base.link) {
+ DRM_DEBUG_KMS("%s:count[%d]e[0x%x]\n",
+ __func__, count++, (int)e);
+
+ /*
+ * quf == NULL condition means all event deletion.
+ * stop operations want to delete all event list.
+ * another case delete only same buf id.
+ */
+ if (!qbuf) {
+ /* delete list */
+ list_del(&e->base.link);
+ kfree(e);
+ }
+
+ /* compare buffer id */
+ if (qbuf && (qbuf->buf_id ==
+ e->event.buf_id[EXYNOS_DRM_OPS_DST])) {
+ /* delete list */
+ list_del(&e->base.link);
+ kfree(e);
+ return;
+ }
+ }
+}
+
+void ipp_handle_cmd_work(struct device *dev,
+ struct exynos_drm_ippdrv *ippdrv,
+ struct drm_exynos_ipp_cmd_work *cmd_work,
+ struct drm_exynos_ipp_cmd_node *c_node)
+{
+ struct ipp_context *ctx = get_ipp_context(dev);
+
+ cmd_work->ippdrv = ippdrv;
+ cmd_work->c_node = c_node;
+ queue_work(ctx->cmd_workq, (struct work_struct *)cmd_work);
+}
+
+static int ipp_queue_buf_with_run(struct device *dev,
+ struct drm_exynos_ipp_cmd_node *c_node,
+ struct drm_exynos_ipp_mem_node *m_node,
+ struct drm_exynos_ipp_queue_buf *qbuf)
+{
+ struct exynos_drm_ippdrv *ippdrv;
+ struct drm_exynos_ipp_property *property;
+ struct exynos_drm_ipp_ops *ops;
+ int ret;
+
+ DRM_DEBUG_KMS("%s\n", __func__);
+
+ ippdrv = ipp_find_drv_by_handle(qbuf->prop_id);
+ if (IS_ERR_OR_NULL(ippdrv)) {
+ DRM_ERROR("failed to get ipp driver.\n");
+ return -EFAULT;
+ }
+
+ ops = ippdrv->ops[qbuf->ops_id];
+ if (!ops) {
+ DRM_ERROR("failed to get ops.\n");
+ return -EFAULT;
+ }
+
+ property = &c_node->property;
+
+ if (c_node->state != IPP_STATE_START) {
+ DRM_DEBUG_KMS("%s:bypass for invalid state.\n" , __func__);
+ return 0;
+ }
+
+ if (!ipp_check_mem_list(c_node)) {
+ DRM_DEBUG_KMS("%s:empty memory.\n", __func__);
+ return 0;
+ }
+
+ /*
+ * If set destination buffer and enabled clock,
+ * then m2m operations need start operations at queue_buf
+ */
+ if (ipp_is_m2m_cmd(property->cmd)) {
+ struct drm_exynos_ipp_cmd_work *cmd_work = c_node->start_work;
+
+ cmd_work->ctrl = IPP_CTRL_PLAY;
+ ipp_handle_cmd_work(dev, ippdrv, cmd_work, c_node);
+ } else {
+ ret = ipp_set_mem_node(ippdrv, c_node, m_node);
+ if (ret) {
+ DRM_ERROR("failed to set m node.\n");
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+static void ipp_clean_queue_buf(struct drm_device *drm_dev,
+ struct drm_exynos_ipp_cmd_node *c_node,
+ struct drm_exynos_ipp_queue_buf *qbuf)
+{
+ struct drm_exynos_ipp_mem_node *m_node, *tm_node;
+
+ DRM_DEBUG_KMS("%s\n", __func__);
+
+ if (!list_empty(&c_node->mem_list[qbuf->ops_id])) {
+ /* delete list */
+ list_for_each_entry_safe(m_node, tm_node,
+ &c_node->mem_list[qbuf->ops_id], list) {
+ if (m_node->buf_id == qbuf->buf_id &&
+ m_node->ops_id == qbuf->ops_id)
+ ipp_put_mem_node(drm_dev, c_node, m_node);
+ }
+ }
+}
+
+int exynos_drm_ipp_queue_buf(struct drm_device *drm_dev, void *data,
+ struct drm_file *file)
+{
+ struct drm_exynos_file_private *file_priv = file->driver_priv;
+ struct exynos_drm_ipp_private *priv = file_priv->ipp_priv;
+ struct device *dev = priv->dev;
+ struct ipp_context *ctx = get_ipp_context(dev);
+ struct drm_exynos_ipp_queue_buf *qbuf = data;
+ struct drm_exynos_ipp_cmd_node *c_node;
+ struct drm_exynos_ipp_mem_node *m_node;
+ int ret;
+
+ DRM_DEBUG_KMS("%s\n", __func__);
+
+ if (!qbuf) {
+ DRM_ERROR("invalid buf parameter.\n");
+ return -EINVAL;
+ }
+
+ if (qbuf->ops_id >= EXYNOS_DRM_OPS_MAX) {
+ DRM_ERROR("invalid ops parameter.\n");
+ return -EINVAL;
+ }
+
+ DRM_DEBUG_KMS("%s:prop_id[%d]ops_id[%s]buf_id[%d]buf_type[%d]\n",
+ __func__, qbuf->prop_id, qbuf->ops_id ? "dst" : "src",
+ qbuf->buf_id, qbuf->buf_type);
+
+ /* find command node */
+ c_node = ipp_find_obj(&ctx->prop_idr, &ctx->prop_lock,
+ qbuf->prop_id);
+ if (!c_node) {
+ DRM_ERROR("failed to get command node.\n");
+ return -EFAULT;
+ }
+
+ /* buffer control */
+ switch (qbuf->buf_type) {
+ case IPP_BUF_ENQUEUE:
+ /* get memory node */
+ m_node = ipp_get_mem_node(drm_dev, file, c_node, qbuf);
+ if (IS_ERR(m_node)) {
+ DRM_ERROR("failed to get m_node.\n");
+ return PTR_ERR(m_node);
+ }
+
+ /*
+ * first step get event for destination buffer.
+ * and second step when M2M case run with destination buffer
+ * if needed.
+ */
+ if (qbuf->ops_id == EXYNOS_DRM_OPS_DST) {
+ /* get event for destination buffer */
+ ret = ipp_get_event(drm_dev, file, c_node, qbuf);
+ if (ret) {
+ DRM_ERROR("failed to get event.\n");
+ goto err_clean_node;
+ }
+
+ /*
+ * M2M case run play control for streaming feature.
+ * other case set address and waiting.
+ */
+ ret = ipp_queue_buf_with_run(dev, c_node, m_node, qbuf);
+ if (ret) {
+ DRM_ERROR("failed to run command.\n");
+ goto err_clean_node;
+ }
+ }
+ break;
+ case IPP_BUF_DEQUEUE:
+ mutex_lock(&c_node->cmd_lock);
+
+ /* put event for destination buffer */
+ if (qbuf->ops_id == EXYNOS_DRM_OPS_DST)
+ ipp_put_event(c_node, qbuf);
+
+ ipp_clean_queue_buf(drm_dev, c_node, qbuf);
+
+ mutex_unlock(&c_node->cmd_lock);
+ break;
+ default:
+ DRM_ERROR("invalid buffer control.\n");
+ return -EINVAL;
+ }
+
+ return 0;
+
+err_clean_node:
+ DRM_ERROR("clean memory nodes.\n");
+
+ ipp_clean_queue_buf(drm_dev, c_node, qbuf);
+ return ret;
+}
+
+static bool exynos_drm_ipp_check_valid(struct device *dev,
+ enum drm_exynos_ipp_ctrl ctrl, enum drm_exynos_ipp_state state)
+{
+ DRM_DEBUG_KMS("%s\n", __func__);
+
+ if (ctrl != IPP_CTRL_PLAY) {
+ if (pm_runtime_suspended(dev)) {
+ DRM_ERROR("pm:runtime_suspended.\n");
+ goto err_status;
+ }
+ }
+
+ switch (ctrl) {
+ case IPP_CTRL_PLAY:
+ if (state != IPP_STATE_IDLE)
+ goto err_status;
+ break;
+ case IPP_CTRL_STOP:
+ if (state == IPP_STATE_STOP)
+ goto err_status;
+ break;
+ case IPP_CTRL_PAUSE:
+ if (state != IPP_STATE_START)
+ goto err_status;
+ break;
+ case IPP_CTRL_RESUME:
+ if (state != IPP_STATE_STOP)
+ goto err_status;
+ break;
+ default:
+ DRM_ERROR("invalid state.\n");
+ goto err_status;
+ break;
+ }
+
+ return true;
+
+err_status:
+ DRM_ERROR("invalid status:ctrl[%d]state[%d]\n", ctrl, state);
+ return false;
+}
+
+int exynos_drm_ipp_cmd_ctrl(struct drm_device *drm_dev, void *data,
+ struct drm_file *file)
+{
+ struct drm_exynos_file_private *file_priv = file->driver_priv;
+ struct exynos_drm_ipp_private *priv = file_priv->ipp_priv;
+ struct exynos_drm_ippdrv *ippdrv = NULL;
+ struct device *dev = priv->dev;
+ struct ipp_context *ctx = get_ipp_context(dev);
+ struct drm_exynos_ipp_cmd_ctrl *cmd_ctrl = data;
+ struct drm_exynos_ipp_cmd_work *cmd_work;
+ struct drm_exynos_ipp_cmd_node *c_node;
+
+ DRM_DEBUG_KMS("%s\n", __func__);
+
+ if (!ctx) {
+ DRM_ERROR("invalid context.\n");
+ return -EINVAL;
+ }
+
+ if (!cmd_ctrl) {
+ DRM_ERROR("invalid control parameter.\n");
+ return -EINVAL;
+ }
+
+ DRM_DEBUG_KMS("%s:ctrl[%d]prop_id[%d]\n", __func__,
+ cmd_ctrl->ctrl, cmd_ctrl->prop_id);
+
+ ippdrv = ipp_find_drv_by_handle(cmd_ctrl->prop_id);
+ if (IS_ERR(ippdrv)) {
+ DRM_ERROR("failed to get ipp driver.\n");
+ return PTR_ERR(ippdrv);
+ }
+
+ c_node = ipp_find_obj(&ctx->prop_idr, &ctx->prop_lock,
+ cmd_ctrl->prop_id);
+ if (!c_node) {
+ DRM_ERROR("invalid command node list.\n");
+ return -EINVAL;
+ }
+
+ if (!exynos_drm_ipp_check_valid(ippdrv->dev, cmd_ctrl->ctrl,
+ c_node->state)) {
+ DRM_ERROR("invalid state.\n");
+ return -EINVAL;
+ }
+
+ switch (cmd_ctrl->ctrl) {
+ case IPP_CTRL_PLAY:
+ if (pm_runtime_suspended(ippdrv->dev))
+ pm_runtime_get_sync(ippdrv->dev);
+ c_node->state = IPP_STATE_START;
+
+ cmd_work = c_node->start_work;
+ cmd_work->ctrl = cmd_ctrl->ctrl;
+ ipp_handle_cmd_work(dev, ippdrv, cmd_work, c_node);
+ c_node->state = IPP_STATE_START;
+ break;
+ case IPP_CTRL_STOP:
+ cmd_work = c_node->stop_work;
+ cmd_work->ctrl = cmd_ctrl->ctrl;
+ ipp_handle_cmd_work(dev, ippdrv, cmd_work, c_node);
+
+ if (!wait_for_completion_timeout(&c_node->stop_complete,
+ msecs_to_jiffies(300))) {
+ DRM_ERROR("timeout stop:prop_id[%d]\n",
+ c_node->property.prop_id);
+ }
+
+ c_node->state = IPP_STATE_STOP;
+ ippdrv->dedicated = false;
+ ipp_clean_cmd_node(c_node);
+
+ if (list_empty(&ippdrv->cmd_list))
+ pm_runtime_put_sync(ippdrv->dev);
+ break;
+ case IPP_CTRL_PAUSE:
+ cmd_work = c_node->stop_work;
+ cmd_work->ctrl = cmd_ctrl->ctrl;
+ ipp_handle_cmd_work(dev, ippdrv, cmd_work, c_node);
+
+ if (!wait_for_completion_timeout(&c_node->stop_complete,
+ msecs_to_jiffies(200))) {
+ DRM_ERROR("timeout stop:prop_id[%d]\n",
+ c_node->property.prop_id);
+ }
+
+ c_node->state = IPP_STATE_STOP;
+ break;
+ case IPP_CTRL_RESUME:
+ c_node->state = IPP_STATE_START;
+ cmd_work = c_node->start_work;
+ cmd_work->ctrl = cmd_ctrl->ctrl;
+ ipp_handle_cmd_work(dev, ippdrv, cmd_work, c_node);
+ break;
+ default:
+ DRM_ERROR("could not support this state currently.\n");
+ return -EINVAL;
+ }
+
+ DRM_DEBUG_KMS("%s:done ctrl[%d]prop_id[%d]\n", __func__,
+ cmd_ctrl->ctrl, cmd_ctrl->prop_id);
+
+ return 0;
+}
+
+int exynos_drm_ippnb_register(struct notifier_block *nb)
+{
+ return blocking_notifier_chain_register(
+ &exynos_drm_ippnb_list, nb);
+}
+
+int exynos_drm_ippnb_unregister(struct notifier_block *nb)
+{
+ return blocking_notifier_chain_unregister(
+ &exynos_drm_ippnb_list, nb);
+}
+
+int exynos_drm_ippnb_send_event(unsigned long val, void *v)
+{
+ return blocking_notifier_call_chain(
+ &exynos_drm_ippnb_list, val, v);
+}
+
+static int ipp_set_property(struct exynos_drm_ippdrv *ippdrv,
+ struct drm_exynos_ipp_property *property)
+{
+ struct exynos_drm_ipp_ops *ops = NULL;
+ bool swap = false;
+ int ret, i;
+
+ if (!property) {
+ DRM_ERROR("invalid property parameter.\n");
+ return -EINVAL;
+ }
+
+ DRM_DEBUG_KMS("%s:prop_id[%d]\n", __func__, property->prop_id);
+
+ /* reset h/w block */
+ if (ippdrv->reset &&
+ ippdrv->reset(ippdrv->dev)) {
+ DRM_ERROR("failed to reset.\n");
+ return -EINVAL;
+ }
+
+ /* set source,destination operations */
+ for_each_ipp_ops(i) {
+ struct drm_exynos_ipp_config *config =
+ &property->config[i];
+
+ ops = ippdrv->ops[i];
+ if (!ops || !config) {
+ DRM_ERROR("not support ops and config.\n");
+ return -EINVAL;
+ }
+
+ /* set format */
+ if (ops->set_fmt) {
+ ret = ops->set_fmt(ippdrv->dev, config->fmt);
+ if (ret) {
+ DRM_ERROR("not support format.\n");
+ return ret;
+ }
+ }
+
+ /* set transform for rotation, flip */
+ if (ops->set_transf) {
+ ret = ops->set_transf(ippdrv->dev, config->degree,
+ config->flip, &swap);
+ if (ret) {
+ DRM_ERROR("not support tranf.\n");
+ return -EINVAL;
+ }
+ }
+
+ /* set size */
+ if (ops->set_size) {
+ ret = ops->set_size(ippdrv->dev, swap, &config->pos,
+ &config->sz);
+ if (ret) {
+ DRM_ERROR("not support size.\n");
+ return ret;
+ }
+ }
+ }
+
+ return 0;
+}
+
+static int ipp_start_property(struct exynos_drm_ippdrv *ippdrv,
+ struct drm_exynos_ipp_cmd_node *c_node)
+{
+ struct drm_exynos_ipp_mem_node *m_node;
+ struct drm_exynos_ipp_property *property = &c_node->property;
+ struct list_head *head;
+ int ret, i;
+
+ DRM_DEBUG_KMS("%s:prop_id[%d]\n", __func__, property->prop_id);
+
+ /* store command info in ippdrv */
+ ippdrv->cmd = c_node;
+
+ if (!ipp_check_mem_list(c_node)) {
+ DRM_DEBUG_KMS("%s:empty memory.\n", __func__);
+ return -ENOMEM;
+ }
+
+ /* set current property in ippdrv */
+ ret = ipp_set_property(ippdrv, property);
+ if (ret) {
+ DRM_ERROR("failed to set property.\n");
+ ippdrv->cmd = NULL;
+ return ret;
+ }
+
+ /* check command */
+ switch (property->cmd) {
+ case IPP_CMD_M2M:
+ for_each_ipp_ops(i) {
+ /* source/destination memory list */
+ head = &c_node->mem_list[i];
+
+ m_node = list_first_entry(head,
+ struct drm_exynos_ipp_mem_node, list);
+ if (!m_node) {
+ DRM_ERROR("failed to get node.\n");
+ ret = -EFAULT;
+ return ret;
+ }
+
+ DRM_DEBUG_KMS("%s:m_node[0x%x]\n",
+ __func__, (int)m_node);
+
+ ret = ipp_set_mem_node(ippdrv, c_node, m_node);
+ if (ret) {
+ DRM_ERROR("failed to set m node.\n");
+ return ret;
+ }
+ }
+ break;
+ case IPP_CMD_WB:
+ /* destination memory list */
+ head = &c_node->mem_list[EXYNOS_DRM_OPS_DST];
+
+ list_for_each_entry(m_node, head, list) {
+ ret = ipp_set_mem_node(ippdrv, c_node, m_node);
+ if (ret) {
+ DRM_ERROR("failed to set m node.\n");
+ return ret;
+ }
+ }
+ break;
+ case IPP_CMD_OUTPUT:
+ /* source memory list */
+ head = &c_node->mem_list[EXYNOS_DRM_OPS_SRC];
+
+ list_for_each_entry(m_node, head, list) {
+ ret = ipp_set_mem_node(ippdrv, c_node, m_node);
+ if (ret) {
+ DRM_ERROR("failed to set m node.\n");
+ return ret;
+ }
+ }
+ break;
+ default:
+ DRM_ERROR("invalid operations.\n");
+ return -EINVAL;
+ }
+
+ DRM_DEBUG_KMS("%s:cmd[%d]\n", __func__, property->cmd);
+
+ /* start operations */
+ if (ippdrv->start) {
+ ret = ippdrv->start(ippdrv->dev, property->cmd);
+ if (ret) {
+ DRM_ERROR("failed to start ops.\n");
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+static int ipp_stop_property(struct drm_device *drm_dev,
+ struct exynos_drm_ippdrv *ippdrv,
+ struct drm_exynos_ipp_cmd_node *c_node)
+{
+ struct drm_exynos_ipp_mem_node *m_node, *tm_node;
+ struct drm_exynos_ipp_property *property = &c_node->property;
+ struct list_head *head;
+ int ret = 0, i;
+
+ DRM_DEBUG_KMS("%s:prop_id[%d]\n", __func__, property->prop_id);
+
+ /* put event */
+ ipp_put_event(c_node, NULL);
+
+ /* check command */
+ switch (property->cmd) {
+ case IPP_CMD_M2M:
+ for_each_ipp_ops(i) {
+ /* source/destination memory list */
+ head = &c_node->mem_list[i];
+
+ if (list_empty(head)) {
+ DRM_DEBUG_KMS("%s:mem_list is empty.\n",
+ __func__);
+ break;
+ }
+
+ list_for_each_entry_safe(m_node, tm_node,
+ head, list) {
+ ret = ipp_put_mem_node(drm_dev, c_node,
+ m_node);
+ if (ret) {
+ DRM_ERROR("failed to put m_node.\n");
+ goto err_clear;
+ }
+ }
+ }
+ break;
+ case IPP_CMD_WB:
+ /* destination memory list */
+ head = &c_node->mem_list[EXYNOS_DRM_OPS_DST];
+
+ if (list_empty(head)) {
+ DRM_DEBUG_KMS("%s:mem_list is empty.\n", __func__);
+ break;
+ }
+
+ list_for_each_entry_safe(m_node, tm_node, head, list) {
+ ret = ipp_put_mem_node(drm_dev, c_node, m_node);
+ if (ret) {
+ DRM_ERROR("failed to put m_node.\n");
+ goto err_clear;
+ }
+ }
+ break;
+ case IPP_CMD_OUTPUT:
+ /* source memory list */
+ head = &c_node->mem_list[EXYNOS_DRM_OPS_SRC];
+
+ if (list_empty(head)) {
+ DRM_DEBUG_KMS("%s:mem_list is empty.\n", __func__);
+ break;
+ }
+
+ list_for_each_entry_safe(m_node, tm_node, head, list) {
+ ret = ipp_put_mem_node(drm_dev, c_node, m_node);
+ if (ret) {
+ DRM_ERROR("failed to put m_node.\n");
+ goto err_clear;
+ }
+ }
+ break;
+ default:
+ DRM_ERROR("invalid operations.\n");
+ ret = -EINVAL;
+ goto err_clear;
+ }
+
+err_clear:
+ /* stop operations */
+ if (ippdrv->stop)
+ ippdrv->stop(ippdrv->dev, property->cmd);
+
+ return ret;
+}
+
+void ipp_sched_cmd(struct work_struct *work)
+{
+ struct drm_exynos_ipp_cmd_work *cmd_work =
+ (struct drm_exynos_ipp_cmd_work *)work;
+ struct exynos_drm_ippdrv *ippdrv;
+ struct drm_exynos_ipp_cmd_node *c_node;
+ struct drm_exynos_ipp_property *property;
+ int ret;
+
+ DRM_DEBUG_KMS("%s\n", __func__);
+
+ ippdrv = cmd_work->ippdrv;
+ if (!ippdrv) {
+ DRM_ERROR("invalid ippdrv list.\n");
+ return;
+ }
+
+ c_node = cmd_work->c_node;
+ if (!c_node) {
+ DRM_ERROR("invalid command node list.\n");
+ return;
+ }
+
+ mutex_lock(&c_node->cmd_lock);
+
+ property = &c_node->property;
+ if (!property) {
+ DRM_ERROR("failed to get property:prop_id[%d]\n",
+ c_node->property.prop_id);
+ goto err_unlock;
+ }
+
+ switch (cmd_work->ctrl) {
+ case IPP_CTRL_PLAY:
+ case IPP_CTRL_RESUME:
+ ret = ipp_start_property(ippdrv, c_node);
+ if (ret) {
+ DRM_ERROR("failed to start property:prop_id[%d]\n",
+ c_node->property.prop_id);
+ goto err_unlock;
+ }
+
+ /*
+ * M2M case supports wait_completion of transfer.
+ * because M2M case supports single unit operation
+ * with multiple queue.
+ * M2M need to wait completion of data transfer.
+ */
+ if (ipp_is_m2m_cmd(property->cmd)) {
+ if (!wait_for_completion_timeout
+ (&c_node->start_complete, msecs_to_jiffies(200))) {
+ DRM_ERROR("timeout event:prop_id[%d]\n",
+ c_node->property.prop_id);
+ goto err_unlock;
+ }
+ }
+ break;
+ case IPP_CTRL_STOP:
+ case IPP_CTRL_PAUSE:
+ ret = ipp_stop_property(ippdrv->drm_dev, ippdrv,
+ c_node);
+ if (ret) {
+ DRM_ERROR("failed to stop property.\n");
+ goto err_unlock;
+ }
+
+ complete(&c_node->stop_complete);
+ break;
+ default:
+ DRM_ERROR("unknown control type\n");
+ break;
+ }
+
+ DRM_DEBUG_KMS("%s:ctrl[%d] done.\n", __func__, cmd_work->ctrl);
+
+err_unlock:
+ mutex_unlock(&c_node->cmd_lock);
+}
+
+static int ipp_send_event(struct exynos_drm_ippdrv *ippdrv,
+ struct drm_exynos_ipp_cmd_node *c_node, int *buf_id)
+{
+ struct drm_device *drm_dev = ippdrv->drm_dev;
+ struct drm_exynos_ipp_property *property = &c_node->property;
+ struct drm_exynos_ipp_mem_node *m_node;
+ struct drm_exynos_ipp_queue_buf qbuf;
+ struct drm_exynos_ipp_send_event *e;
+ struct list_head *head;
+ struct timeval now;
+ unsigned long flags;
+ u32 tbuf_id[EXYNOS_DRM_OPS_MAX] = {0, };
+ int ret, i;
+
+ for_each_ipp_ops(i)
+ DRM_DEBUG_KMS("%s:%s buf_id[%d]\n", __func__,
+ i ? "dst" : "src", buf_id[i]);
+
+ if (!drm_dev) {
+ DRM_ERROR("failed to get drm_dev.\n");
+ return -EINVAL;
+ }
+
+ if (!property) {
+ DRM_ERROR("failed to get property.\n");
+ return -EINVAL;
+ }
+
+ if (list_empty(&c_node->event_list)) {
+ DRM_DEBUG_KMS("%s:event list is empty.\n", __func__);
+ return 0;
+ }
+
+ if (!ipp_check_mem_list(c_node)) {
+ DRM_DEBUG_KMS("%s:empty memory.\n", __func__);
+ return 0;
+ }
+
+ /* check command */
+ switch (property->cmd) {
+ case IPP_CMD_M2M:
+ for_each_ipp_ops(i) {
+ /* source/destination memory list */
+ head = &c_node->mem_list[i];
+
+ m_node = list_first_entry(head,
+ struct drm_exynos_ipp_mem_node, list);
+ if (!m_node) {
+ DRM_ERROR("empty memory node.\n");
+ return -ENOMEM;
+ }
+
+ tbuf_id[i] = m_node->buf_id;
+ DRM_DEBUG_KMS("%s:%s buf_id[%d]\n", __func__,
+ i ? "dst" : "src", tbuf_id[i]);
+
+ ret = ipp_put_mem_node(drm_dev, c_node, m_node);
+ if (ret)
+ DRM_ERROR("failed to put m_node.\n");
+ }
+ break;
+ case IPP_CMD_WB:
+ /* clear buf for finding */
+ memset(&qbuf, 0x0, sizeof(qbuf));
+ qbuf.ops_id = EXYNOS_DRM_OPS_DST;
+ qbuf.buf_id = buf_id[EXYNOS_DRM_OPS_DST];
+
+ /* get memory node entry */
+ m_node = ipp_find_mem_node(c_node, &qbuf);
+ if (!m_node) {
+ DRM_ERROR("empty memory node.\n");
+ return -ENOMEM;
+ }
+
+ tbuf_id[EXYNOS_DRM_OPS_DST] = m_node->buf_id;
+
+ ret = ipp_put_mem_node(drm_dev, c_node, m_node);
+ if (ret)
+ DRM_ERROR("failed to put m_node.\n");
+ break;
+ case IPP_CMD_OUTPUT:
+ /* source memory list */
+ head = &c_node->mem_list[EXYNOS_DRM_OPS_SRC];
+
+ m_node = list_first_entry(head,
+ struct drm_exynos_ipp_mem_node, list);
+ if (!m_node) {
+ DRM_ERROR("empty memory node.\n");
+ return -ENOMEM;
+ }
+
+ tbuf_id[EXYNOS_DRM_OPS_SRC] = m_node->buf_id;
+
+ ret = ipp_put_mem_node(drm_dev, c_node, m_node);
+ if (ret)
+ DRM_ERROR("failed to put m_node.\n");
+ break;
+ default:
+ DRM_ERROR("invalid operations.\n");
+ return -EINVAL;
+ }
+
+ if (tbuf_id[EXYNOS_DRM_OPS_DST] != buf_id[EXYNOS_DRM_OPS_DST])
+ DRM_ERROR("failed to match buf_id[%d %d]prop_id[%d]\n",
+ tbuf_id[1], buf_id[1], property->prop_id);
+
+ /*
+ * command node have event list of destination buffer
+ * If destination buffer enqueue to mem list,
+ * then we make event and link to event list tail.
+ * so, we get first event for first enqueued buffer.
+ */
+ e = list_first_entry(&c_node->event_list,
+ struct drm_exynos_ipp_send_event, base.link);
+
+ if (!e) {
+ DRM_ERROR("empty event.\n");
+ return -EINVAL;
+ }
+
+ do_gettimeofday(&now);
+ DRM_DEBUG_KMS("%s:tv_sec[%ld]tv_usec[%ld]\n"
+ , __func__, now.tv_sec, now.tv_usec);
+ e->event.tv_sec = now.tv_sec;
+ e->event.tv_usec = now.tv_usec;
+ e->event.prop_id = property->prop_id;
+
+ /* set buffer id about source destination */
+ for_each_ipp_ops(i)
+ e->event.buf_id[i] = tbuf_id[i];
+
+ spin_lock_irqsave(&drm_dev->event_lock, flags);
+ list_move_tail(&e->base.link, &e->base.file_priv->event_list);
+ wake_up_interruptible(&e->base.file_priv->event_wait);
+ spin_unlock_irqrestore(&drm_dev->event_lock, flags);
+
+ DRM_DEBUG_KMS("%s:done cmd[%d]prop_id[%d]buf_id[%d]\n", __func__,
+ property->cmd, property->prop_id, tbuf_id[EXYNOS_DRM_OPS_DST]);
+
+ return 0;
+}
+
+void ipp_sched_event(struct work_struct *work)
+{
+ struct drm_exynos_ipp_event_work *event_work =
+ (struct drm_exynos_ipp_event_work *)work;
+ struct exynos_drm_ippdrv *ippdrv;
+ struct drm_exynos_ipp_cmd_node *c_node;
+ int ret;
+
+ if (!event_work) {
+ DRM_ERROR("failed to get event_work.\n");
+ return;
+ }
+
+ DRM_DEBUG_KMS("%s:buf_id[%d]\n", __func__,
+ event_work->buf_id[EXYNOS_DRM_OPS_DST]);
+
+ ippdrv = event_work->ippdrv;
+ if (!ippdrv) {
+ DRM_ERROR("failed to get ipp driver.\n");
+ return;
+ }
+
+ c_node = ippdrv->cmd;
+ if (!c_node) {
+ DRM_ERROR("failed to get command node.\n");
+ return;
+ }
+
+ /*
+ * IPP supports command thread, event thread synchronization.
+ * If IPP close immediately from user land, then IPP make
+ * synchronization with command thread, so make complete event.
+ * or going out operations.
+ */
+ if (c_node->state != IPP_STATE_START) {
+ DRM_DEBUG_KMS("%s:bypass state[%d]prop_id[%d]\n",
+ __func__, c_node->state, c_node->property.prop_id);
+ goto err_completion;
+ }
+
+ mutex_lock(&c_node->event_lock);
+
+ ret = ipp_send_event(ippdrv, c_node, event_work->buf_id);
+ if (ret) {
+ DRM_ERROR("failed to send event.\n");
+ goto err_completion;
+ }
+
+err_completion:
+ if (ipp_is_m2m_cmd(c_node->property.cmd))
+ complete(&c_node->start_complete);
+
+ mutex_unlock(&c_node->event_lock);
+}
+
+static int ipp_subdrv_probe(struct drm_device *drm_dev, struct device *dev)
+{
+ struct ipp_context *ctx = get_ipp_context(dev);
+ struct exynos_drm_ippdrv *ippdrv;
+ int ret, count = 0;
+
+ DRM_DEBUG_KMS("%s\n", __func__);
+
+ /* get ipp driver entry */
+ list_for_each_entry(ippdrv, &exynos_drm_ippdrv_list, drv_list) {
+ ippdrv->drm_dev = drm_dev;
+
+ ret = ipp_create_id(&ctx->ipp_idr, &ctx->ipp_lock, ippdrv,
+ &ippdrv->ipp_id);
+ if (ret) {
+ DRM_ERROR("failed to create id.\n");
+ goto err_idr;
+ }
+
+ DRM_DEBUG_KMS("%s:count[%d]ippdrv[0x%x]ipp_id[%d]\n", __func__,
+ count++, (int)ippdrv, ippdrv->ipp_id);
+
+ if (ippdrv->ipp_id == 0) {
+ DRM_ERROR("failed to get ipp_id[%d]\n",
+ ippdrv->ipp_id);
+ goto err_idr;
+ }
+
+ /* store parent device for node */
+ ippdrv->parent_dev = dev;
+
+ /* store event work queue and handler */
+ ippdrv->event_workq = ctx->event_workq;
+ ippdrv->sched_event = ipp_sched_event;
+ INIT_LIST_HEAD(&ippdrv->cmd_list);
+
+ if (is_drm_iommu_supported(drm_dev)) {
+ ret = drm_iommu_attach_device(drm_dev, ippdrv->dev);
+ if (ret) {
+ DRM_ERROR("failed to activate iommu\n");
+ goto err_iommu;
+ }
+ }
+ }
+
+ return 0;
+
+err_iommu:
+ /* get ipp driver entry */
+ list_for_each_entry_reverse(ippdrv, &exynos_drm_ippdrv_list, drv_list)
+ if (is_drm_iommu_supported(drm_dev))
+ drm_iommu_detach_device(drm_dev, ippdrv->dev);
+
+err_idr:
+ idr_remove_all(&ctx->ipp_idr);
+ idr_remove_all(&ctx->prop_idr);
+ idr_destroy(&ctx->ipp_idr);
+ idr_destroy(&ctx->prop_idr);
+ return ret;
+}
+
+static void ipp_subdrv_remove(struct drm_device *drm_dev, struct device *dev)
+{
+ struct exynos_drm_ippdrv *ippdrv;
+
+ DRM_DEBUG_KMS("%s\n", __func__);
+
+ /* get ipp driver entry */
+ list_for_each_entry(ippdrv, &exynos_drm_ippdrv_list, drv_list) {
+ if (is_drm_iommu_supported(drm_dev))
+ drm_iommu_detach_device(drm_dev, ippdrv->dev);
+
+ ippdrv->drm_dev = NULL;
+ exynos_drm_ippdrv_unregister(ippdrv);
+ }
+}
+
+static int ipp_subdrv_open(struct drm_device *drm_dev, struct device *dev,
+ struct drm_file *file)
+{
+ struct drm_exynos_file_private *file_priv = file->driver_priv;
+ struct exynos_drm_ipp_private *priv;
+
+ DRM_DEBUG_KMS("%s\n", __func__);
+
+ priv = kzalloc(sizeof(*priv), GFP_KERNEL);
+ if (!priv) {
+ DRM_ERROR("failed to allocate priv.\n");
+ return -ENOMEM;
+ }
+ priv->dev = dev;
+ file_priv->ipp_priv = priv;
+
+ INIT_LIST_HEAD(&priv->event_list);
+
+ DRM_DEBUG_KMS("%s:done priv[0x%x]\n", __func__, (int)priv);
+
+ return 0;
+}
+
+static void ipp_subdrv_close(struct drm_device *drm_dev, struct device *dev,
+ struct drm_file *file)
+{
+ struct drm_exynos_file_private *file_priv = file->driver_priv;
+ struct exynos_drm_ipp_private *priv = file_priv->ipp_priv;
+ struct exynos_drm_ippdrv *ippdrv = NULL;
+ struct drm_exynos_ipp_cmd_node *c_node, *tc_node;
+ int count = 0;
+
+ DRM_DEBUG_KMS("%s:for priv[0x%x]\n", __func__, (int)priv);
+
+ if (list_empty(&exynos_drm_ippdrv_list)) {
+ DRM_DEBUG_KMS("%s:ippdrv_list is empty.\n", __func__);
+ goto err_clear;
+ }
+
+ list_for_each_entry(ippdrv, &exynos_drm_ippdrv_list, drv_list) {
+ if (list_empty(&ippdrv->cmd_list))
+ continue;
+
+ list_for_each_entry_safe(c_node, tc_node,
+ &ippdrv->cmd_list, list) {
+ DRM_DEBUG_KMS("%s:count[%d]ippdrv[0x%x]\n",
+ __func__, count++, (int)ippdrv);
+
+ if (c_node->priv == priv) {
+ /*
+ * userland goto unnormal state. process killed.
+ * and close the file.
+ * so, IPP didn't called stop cmd ctrl.
+ * so, we are make stop operation in this state.
+ */
+ if (c_node->state == IPP_STATE_START) {
+ ipp_stop_property(drm_dev, ippdrv,
+ c_node);
+ c_node->state = IPP_STATE_STOP;
+ }
+
+ ippdrv->dedicated = false;
+ ipp_clean_cmd_node(c_node);
+ if (list_empty(&ippdrv->cmd_list))
+ pm_runtime_put_sync(ippdrv->dev);
+ }
+ }
+ }
+
+err_clear:
+ kfree(priv);
+ return;
+}
+
+static int __devinit ipp_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct ipp_context *ctx;
+ struct exynos_drm_subdrv *subdrv;
+ int ret;
+
+ ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
+ if (!ctx)
+ return -ENOMEM;
+
+ DRM_DEBUG_KMS("%s\n", __func__);
+
+ mutex_init(&ctx->ipp_lock);
+ mutex_init(&ctx->prop_lock);
+
+ idr_init(&ctx->ipp_idr);
+ idr_init(&ctx->prop_idr);
+
+ /*
+ * create single thread for ipp event
+ * IPP supports event thread for IPP drivers.
+ * IPP driver send event_work to this thread.
+ * and IPP event thread send event to user process.
+ */
+ ctx->event_workq = create_singlethread_workqueue("ipp_event");
+ if (!ctx->event_workq) {
+ dev_err(dev, "failed to create event workqueue\n");
+ ret = -EINVAL;
+ goto err_clear;
+ }
+
+ /*
+ * create single thread for ipp command
+ * IPP supports command thread for user process.
+ * user process make command node using set property ioctl.
+ * and make start_work and send this work to command thread.
+ * and then this command thread start property.
+ */
+ ctx->cmd_workq = create_singlethread_workqueue("ipp_cmd");
+ if (!ctx->cmd_workq) {
+ dev_err(dev, "failed to create cmd workqueue\n");
+ ret = -EINVAL;
+ goto err_event_workq;
+ }
+
+ /* set sub driver informations */
+ subdrv = &ctx->subdrv;
+ subdrv->dev = dev;
+ subdrv->probe = ipp_subdrv_probe;
+ subdrv->remove = ipp_subdrv_remove;
+ subdrv->open = ipp_subdrv_open;
+ subdrv->close = ipp_subdrv_close;
+
+ platform_set_drvdata(pdev, ctx);
+
+ ret = exynos_drm_subdrv_register(subdrv);
+ if (ret < 0) {
+ DRM_ERROR("failed to register drm ipp device.\n");
+ goto err_cmd_workq;
+ }
+
+ dev_info(&pdev->dev, "drm ipp registered successfully.\n");
+
+ return 0;
+
+err_cmd_workq:
+ destroy_workqueue(ctx->cmd_workq);
+err_event_workq:
+ destroy_workqueue(ctx->event_workq);
+err_clear:
+ kfree(ctx);
+ return ret;
+}
+
+static int __devexit ipp_remove(struct platform_device *pdev)
+{
+ struct ipp_context *ctx = platform_get_drvdata(pdev);
+
+ DRM_DEBUG_KMS("%s\n", __func__);
+
+ /* unregister sub driver */
+ exynos_drm_subdrv_unregister(&ctx->subdrv);
+
+ /* remove,destroy ipp idr */
+ idr_remove_all(&ctx->ipp_idr);
+ idr_remove_all(&ctx->prop_idr);
+ idr_destroy(&ctx->ipp_idr);
+ idr_destroy(&ctx->prop_idr);
+
+ mutex_destroy(&ctx->ipp_lock);
+ mutex_destroy(&ctx->prop_lock);
+
+ /* destroy command, event work queue */
+ destroy_workqueue(ctx->cmd_workq);
+ destroy_workqueue(ctx->event_workq);
+
+ kfree(ctx);
+
+ return 0;
+}
+
+static int ipp_power_ctrl(struct ipp_context *ctx, bool enable)
+{
+ DRM_DEBUG_KMS("%s:enable[%d]\n", __func__, enable);
+
+ return 0;
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int ipp_suspend(struct device *dev)
+{
+ struct ipp_context *ctx = get_ipp_context(dev);
+
+ DRM_DEBUG_KMS("%s\n", __func__);
+
+ if (pm_runtime_suspended(dev))
+ return 0;
+
+ return ipp_power_ctrl(ctx, false);
+}
+
+static int ipp_resume(struct device *dev)
+{
+ struct ipp_context *ctx = get_ipp_context(dev);
+
+ DRM_DEBUG_KMS("%s\n", __func__);
+
+ if (!pm_runtime_suspended(dev))
+ return ipp_power_ctrl(ctx, true);
+
+ return 0;
+}
+#endif
+
+#ifdef CONFIG_PM_RUNTIME
+static int ipp_runtime_suspend(struct device *dev)
+{
+ struct ipp_context *ctx = get_ipp_context(dev);
+
+ DRM_DEBUG_KMS("%s\n", __func__);
+
+ return ipp_power_ctrl(ctx, false);
+}
+
+static int ipp_runtime_resume(struct device *dev)
+{
+ struct ipp_context *ctx = get_ipp_context(dev);
+
+ DRM_DEBUG_KMS("%s\n", __func__);
+
+ return ipp_power_ctrl(ctx, true);
+}
+#endif
+
+static const struct dev_pm_ops ipp_pm_ops = {
+ SET_SYSTEM_SLEEP_PM_OPS(ipp_suspend, ipp_resume)
+ SET_RUNTIME_PM_OPS(ipp_runtime_suspend, ipp_runtime_resume, NULL)
+};
+
+struct platform_driver ipp_driver = {
+ .probe = ipp_probe,
+ .remove = __devexit_p(ipp_remove),
+ .driver = {
+ .name = "exynos-drm-ipp",
+ .owner = THIS_MODULE,
+ .pm = &ipp_pm_ops,
+ },
+};
+
diff --git a/drivers/gpu/drm/exynos/exynos_drm_ipp.h b/drivers/gpu/drm/exynos/exynos_drm_ipp.h
new file mode 100644
index 000000000000..28ffac95386c
--- /dev/null
+++ b/drivers/gpu/drm/exynos/exynos_drm_ipp.h
@@ -0,0 +1,266 @@
+/*
+ * Copyright (c) 2012 Samsung Electronics Co., Ltd.
+ *
+ * Authors:
+ * Eunchul Kim <chulspro.kim@samsung.com>
+ * Jinyoung Jeon <jy0.jeon@samsung.com>
+ * Sangmin Lee <lsmin.lee@samsung.com>
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#ifndef _EXYNOS_DRM_IPP_H_
+#define _EXYNOS_DRM_IPP_H_
+
+#define for_each_ipp_ops(pos) \
+ for (pos = 0; pos < EXYNOS_DRM_OPS_MAX; pos++)
+#define for_each_ipp_planar(pos) \
+ for (pos = 0; pos < EXYNOS_DRM_PLANAR_MAX; pos++)
+
+#define IPP_GET_LCD_WIDTH _IOR('F', 302, int)
+#define IPP_GET_LCD_HEIGHT _IOR('F', 303, int)
+#define IPP_SET_WRITEBACK _IOW('F', 304, u32)
+
+/* definition of state */
+enum drm_exynos_ipp_state {
+ IPP_STATE_IDLE,
+ IPP_STATE_START,
+ IPP_STATE_STOP,
+};
+
+/*
+ * A structure of command work information.
+ * @work: work structure.
+ * @ippdrv: current work ippdrv.
+ * @c_node: command node information.
+ * @ctrl: command control.
+ */
+struct drm_exynos_ipp_cmd_work {
+ struct work_struct work;
+ struct exynos_drm_ippdrv *ippdrv;
+ struct drm_exynos_ipp_cmd_node *c_node;
+ enum drm_exynos_ipp_ctrl ctrl;
+};
+
+/*
+ * A structure of command node.
+ *
+ * @priv: IPP private infomation.
+ * @list: list head to command queue information.
+ * @event_list: list head of event.
+ * @mem_list: list head to source,destination memory queue information.
+ * @cmd_lock: lock for synchronization of access to ioctl.
+ * @mem_lock: lock for synchronization of access to memory nodes.
+ * @event_lock: lock for synchronization of access to scheduled event.
+ * @start_complete: completion of start of command.
+ * @stop_complete: completion of stop of command.
+ * @property: property information.
+ * @start_work: start command work structure.
+ * @stop_work: stop command work structure.
+ * @event_work: event work structure.
+ * @state: state of command node.
+ */
+struct drm_exynos_ipp_cmd_node {
+ struct exynos_drm_ipp_private *priv;
+ struct list_head list;
+ struct list_head event_list;
+ struct list_head mem_list[EXYNOS_DRM_OPS_MAX];
+ struct mutex cmd_lock;
+ struct mutex mem_lock;
+ struct mutex event_lock;
+ struct completion start_complete;
+ struct completion stop_complete;
+ struct drm_exynos_ipp_property property;
+ struct drm_exynos_ipp_cmd_work *start_work;
+ struct drm_exynos_ipp_cmd_work *stop_work;
+ struct drm_exynos_ipp_event_work *event_work;
+ enum drm_exynos_ipp_state state;
+};
+
+/*
+ * A structure of buffer information.
+ *
+ * @gem_objs: Y, Cb, Cr each gem object.
+ * @base: Y, Cb, Cr each planar address.
+ */
+struct drm_exynos_ipp_buf_info {
+ unsigned long handles[EXYNOS_DRM_PLANAR_MAX];
+ dma_addr_t base[EXYNOS_DRM_PLANAR_MAX];
+};
+
+/*
+ * A structure of wb setting infomation.
+ *
+ * @enable: enable flag for wb.
+ * @refresh: HZ of the refresh rate.
+ */
+struct drm_exynos_ipp_set_wb {
+ __u32 enable;
+ __u32 refresh;
+};
+
+/*
+ * A structure of event work information.
+ *
+ * @work: work structure.
+ * @ippdrv: current work ippdrv.
+ * @buf_id: id of src, dst buffer.
+ */
+struct drm_exynos_ipp_event_work {
+ struct work_struct work;
+ struct exynos_drm_ippdrv *ippdrv;
+ u32 buf_id[EXYNOS_DRM_OPS_MAX];
+};
+
+/*
+ * A structure of source,destination operations.
+ *
+ * @set_fmt: set format of image.
+ * @set_transf: set transform(rotations, flip).
+ * @set_size: set size of region.
+ * @set_addr: set address for dma.
+ */
+struct exynos_drm_ipp_ops {
+ int (*set_fmt)(struct device *dev, u32 fmt);
+ int (*set_transf)(struct device *dev,
+ enum drm_exynos_degree degree,
+ enum drm_exynos_flip flip, bool *swap);
+ int (*set_size)(struct device *dev, int swap,
+ struct drm_exynos_pos *pos, struct drm_exynos_sz *sz);
+ int (*set_addr)(struct device *dev,
+ struct drm_exynos_ipp_buf_info *buf_info, u32 buf_id,
+ enum drm_exynos_ipp_buf_type buf_type);
+};
+
+/*
+ * A structure of ipp driver.
+ *
+ * @drv_list: list head for registed sub driver information.
+ * @parent_dev: parent device information.
+ * @dev: platform device.
+ * @drm_dev: drm device.
+ * @ipp_id: id of ipp driver.
+ * @dedicated: dedicated ipp device.
+ * @ops: source, destination operations.
+ * @event_workq: event work queue.
+ * @cmd: current command information.
+ * @cmd_list: list head for command information.
+ * @prop_list: property informations of current ipp driver.
+ * @check_property: check property about format, size, buffer.
+ * @reset: reset ipp block.
+ * @start: ipp each device start.
+ * @stop: ipp each device stop.
+ * @sched_event: work schedule handler.
+ */
+struct exynos_drm_ippdrv {
+ struct list_head drv_list;
+ struct device *parent_dev;
+ struct device *dev;
+ struct drm_device *drm_dev;
+ u32 ipp_id;
+ bool dedicated;
+ struct exynos_drm_ipp_ops *ops[EXYNOS_DRM_OPS_MAX];
+ struct workqueue_struct *event_workq;
+ struct drm_exynos_ipp_cmd_node *cmd;
+ struct list_head cmd_list;
+ struct drm_exynos_ipp_prop_list *prop_list;
+
+ int (*check_property)(struct device *dev,
+ struct drm_exynos_ipp_property *property);
+ int (*reset)(struct device *dev);
+ int (*start)(struct device *dev, enum drm_exynos_ipp_cmd cmd);
+ void (*stop)(struct device *dev, enum drm_exynos_ipp_cmd cmd);
+ void (*sched_event)(struct work_struct *work);
+};
+
+#ifdef CONFIG_DRM_EXYNOS_IPP
+extern int exynos_drm_ippdrv_register(struct exynos_drm_ippdrv *ippdrv);
+extern int exynos_drm_ippdrv_unregister(struct exynos_drm_ippdrv *ippdrv);
+extern int exynos_drm_ipp_get_property(struct drm_device *drm_dev, void *data,
+ struct drm_file *file);
+extern int exynos_drm_ipp_set_property(struct drm_device *drm_dev, void *data,
+ struct drm_file *file);
+extern int exynos_drm_ipp_queue_buf(struct drm_device *drm_dev, void *data,
+ struct drm_file *file);
+extern int exynos_drm_ipp_cmd_ctrl(struct drm_device *drm_dev, void *data,
+ struct drm_file *file);
+extern int exynos_drm_ippnb_register(struct notifier_block *nb);
+extern int exynos_drm_ippnb_unregister(struct notifier_block *nb);
+extern int exynos_drm_ippnb_send_event(unsigned long val, void *v);
+extern void ipp_sched_cmd(struct work_struct *work);
+extern void ipp_sched_event(struct work_struct *work);
+
+#else
+static inline int exynos_drm_ippdrv_register(struct exynos_drm_ippdrv *ippdrv)
+{
+ return -ENODEV;
+}
+
+static inline int exynos_drm_ippdrv_unregister(struct exynos_drm_ippdrv *ippdrv)
+{
+ return -ENODEV;
+}
+
+static inline int exynos_drm_ipp_get_property(struct drm_device *drm_dev,
+ void *data,
+ struct drm_file *file_priv)
+{
+ return -ENOTTY;
+}
+
+static inline int exynos_drm_ipp_set_property(struct drm_device *drm_dev,
+ void *data,
+ struct drm_file *file_priv)
+{
+ return -ENOTTY;
+}
+
+static inline int exynos_drm_ipp_queue_buf(struct drm_device *drm_dev,
+ void *data,
+ struct drm_file *file)
+{
+ return -ENOTTY;
+}
+
+static inline int exynos_drm_ipp_cmd_ctrl(struct drm_device *drm_dev,
+ void *data,
+ struct drm_file *file)
+{
+ return -ENOTTY;
+}
+
+static inline int exynos_drm_ippnb_register(struct notifier_block *nb)
+{
+ return -ENODEV;
+}
+
+static inline int exynos_drm_ippnb_unregister(struct notifier_block *nb)
+{
+ return -ENODEV;
+}
+
+static inline int exynos_drm_ippnb_send_event(unsigned long val, void *v)
+{
+ return -ENOTTY;
+}
+#endif
+
+#endif /* _EXYNOS_DRM_IPP_H_ */
+
diff --git a/drivers/gpu/drm/exynos/exynos_drm_plane.c b/drivers/gpu/drm/exynos/exynos_drm_plane.c
index 60b877a388c2..83efc662d65a 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_plane.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_plane.c
@@ -40,7 +40,7 @@ static const uint32_t formats[] = {
* CRTC ----------------
* ^ start ^ end
*
- * There are six cases from a to b.
+ * There are six cases from a to f.
*
* <----- SCREEN ----->
* 0 last
@@ -93,11 +93,9 @@ int exynos_plane_mode_set(struct drm_plane *plane, struct drm_crtc *crtc,
}
overlay->dma_addr[i] = buffer->dma_addr;
- overlay->vaddr[i] = buffer->kvaddr;
- DRM_DEBUG_KMS("buffer: %d, vaddr = 0x%lx, dma_addr = 0x%lx\n",
- i, (unsigned long)overlay->vaddr[i],
- (unsigned long)overlay->dma_addr[i]);
+ DRM_DEBUG_KMS("buffer: %d, dma_addr = 0x%lx\n",
+ i, (unsigned long)overlay->dma_addr[i]);
}
actual_w = exynos_plane_get_size(crtc_x, crtc_w, crtc->mode.hdisplay);
@@ -106,16 +104,12 @@ int exynos_plane_mode_set(struct drm_plane *plane, struct drm_crtc *crtc,
if (crtc_x < 0) {
if (actual_w)
src_x -= crtc_x;
- else
- src_x += crtc_w;
crtc_x = 0;
}
if (crtc_y < 0) {
if (actual_h)
src_y -= crtc_y;
- else
- src_y += crtc_h;
crtc_y = 0;
}
@@ -204,7 +198,6 @@ exynos_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
return ret;
plane->crtc = crtc;
- plane->fb = crtc->fb;
exynos_plane_commit(plane);
exynos_plane_dpms(plane, DRM_MODE_DPMS_ON);
diff --git a/drivers/gpu/drm/exynos/exynos_drm_rotator.c b/drivers/gpu/drm/exynos/exynos_drm_rotator.c
new file mode 100644
index 000000000000..1c2366083c70
--- /dev/null
+++ b/drivers/gpu/drm/exynos/exynos_drm_rotator.c
@@ -0,0 +1,855 @@
+/*
+ * Copyright (C) 2012 Samsung Electronics Co.Ltd
+ * Authors:
+ * YoungJun Cho <yj44.cho@samsung.com>
+ * Eunchul Kim <chulspro.kim@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundationr
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/err.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/platform_device.h>
+#include <linux/clk.h>
+#include <linux/pm_runtime.h>
+
+#include <drm/drmP.h>
+#include <drm/exynos_drm.h>
+#include "regs-rotator.h"
+#include "exynos_drm.h"
+#include "exynos_drm_ipp.h"
+
+/*
+ * Rotator supports image crop/rotator and input/output DMA operations.
+ * input DMA reads image data from the memory.
+ * output DMA writes image data to memory.
+ *
+ * M2M operation : supports crop/scale/rotation/csc so on.
+ * Memory ----> Rotator H/W ----> Memory.
+ */
+
+/*
+ * TODO
+ * 1. check suspend/resume api if needed.
+ * 2. need to check use case platform_device_id.
+ * 3. check src/dst size with, height.
+ * 4. need to add supported list in prop_list.
+ */
+
+#define get_rot_context(dev) platform_get_drvdata(to_platform_device(dev))
+#define get_ctx_from_ippdrv(ippdrv) container_of(ippdrv,\
+ struct rot_context, ippdrv);
+#define rot_read(offset) readl(rot->regs + (offset))
+#define rot_write(cfg, offset) writel(cfg, rot->regs + (offset))
+
+enum rot_irq_status {
+ ROT_IRQ_STATUS_COMPLETE = 8,
+ ROT_IRQ_STATUS_ILLEGAL = 9,
+};
+
+/*
+ * A structure of limitation.
+ *
+ * @min_w: minimum width.
+ * @min_h: minimum height.
+ * @max_w: maximum width.
+ * @max_h: maximum height.
+ * @align: align size.
+ */
+struct rot_limit {
+ u32 min_w;
+ u32 min_h;
+ u32 max_w;
+ u32 max_h;
+ u32 align;
+};
+
+/*
+ * A structure of limitation table.
+ *
+ * @ycbcr420_2p: case of YUV.
+ * @rgb888: case of RGB.
+ */
+struct rot_limit_table {
+ struct rot_limit ycbcr420_2p;
+ struct rot_limit rgb888;
+};
+
+/*
+ * A structure of rotator context.
+ * @ippdrv: prepare initialization using ippdrv.
+ * @regs_res: register resources.
+ * @regs: memory mapped io registers.
+ * @clock: rotator gate clock.
+ * @limit_tbl: limitation of rotator.
+ * @irq: irq number.
+ * @cur_buf_id: current operation buffer id.
+ * @suspended: suspended state.
+ */
+struct rot_context {
+ struct exynos_drm_ippdrv ippdrv;
+ struct resource *regs_res;
+ void __iomem *regs;
+ struct clk *clock;
+ struct rot_limit_table *limit_tbl;
+ int irq;
+ int cur_buf_id[EXYNOS_DRM_OPS_MAX];
+ bool suspended;
+};
+
+static void rotator_reg_set_irq(struct rot_context *rot, bool enable)
+{
+ u32 val = rot_read(ROT_CONFIG);
+
+ if (enable == true)
+ val |= ROT_CONFIG_IRQ;
+ else
+ val &= ~ROT_CONFIG_IRQ;
+
+ rot_write(val, ROT_CONFIG);
+}
+
+static u32 rotator_reg_get_fmt(struct rot_context *rot)
+{
+ u32 val = rot_read(ROT_CONTROL);
+
+ val &= ROT_CONTROL_FMT_MASK;
+
+ return val;
+}
+
+static enum rot_irq_status rotator_reg_get_irq_status(struct rot_context *rot)
+{
+ u32 val = rot_read(ROT_STATUS);
+
+ val = ROT_STATUS_IRQ(val);
+
+ if (val == ROT_STATUS_IRQ_VAL_COMPLETE)
+ return ROT_IRQ_STATUS_COMPLETE;
+
+ return ROT_IRQ_STATUS_ILLEGAL;
+}
+
+static irqreturn_t rotator_irq_handler(int irq, void *arg)
+{
+ struct rot_context *rot = arg;
+ struct exynos_drm_ippdrv *ippdrv = &rot->ippdrv;
+ struct drm_exynos_ipp_cmd_node *c_node = ippdrv->cmd;
+ struct drm_exynos_ipp_event_work *event_work = c_node->event_work;
+ enum rot_irq_status irq_status;
+ u32 val;
+
+ /* Get execution result */
+ irq_status = rotator_reg_get_irq_status(rot);
+
+ /* clear status */
+ val = rot_read(ROT_STATUS);
+ val |= ROT_STATUS_IRQ_PENDING((u32)irq_status);
+ rot_write(val, ROT_STATUS);
+
+ if (irq_status == ROT_IRQ_STATUS_COMPLETE) {
+ event_work->ippdrv = ippdrv;
+ event_work->buf_id[EXYNOS_DRM_OPS_DST] =
+ rot->cur_buf_id[EXYNOS_DRM_OPS_DST];
+ queue_work(ippdrv->event_workq,
+ (struct work_struct *)event_work);
+ } else
+ DRM_ERROR("the SFR is set illegally\n");
+
+ return IRQ_HANDLED;
+}
+
+static void rotator_align_size(struct rot_context *rot, u32 fmt, u32 *hsize,
+ u32 *vsize)
+{
+ struct rot_limit_table *limit_tbl = rot->limit_tbl;
+ struct rot_limit *limit;
+ u32 mask, val;
+
+ /* Get size limit */
+ if (fmt == ROT_CONTROL_FMT_RGB888)
+ limit = &limit_tbl->rgb888;
+ else
+ limit = &limit_tbl->ycbcr420_2p;
+
+ /* Get mask for rounding to nearest aligned val */
+ mask = ~((1 << limit->align) - 1);
+
+ /* Set aligned width */
+ val = ROT_ALIGN(*hsize, limit->align, mask);
+ if (val < limit->min_w)
+ *hsize = ROT_MIN(limit->min_w, mask);
+ else if (val > limit->max_w)
+ *hsize = ROT_MAX(limit->max_w, mask);
+ else
+ *hsize = val;
+
+ /* Set aligned height */
+ val = ROT_ALIGN(*vsize, limit->align, mask);
+ if (val < limit->min_h)
+ *vsize = ROT_MIN(limit->min_h, mask);
+ else if (val > limit->max_h)
+ *vsize = ROT_MAX(limit->max_h, mask);
+ else
+ *vsize = val;
+}
+
+static int rotator_src_set_fmt(struct device *dev, u32 fmt)
+{
+ struct rot_context *rot = dev_get_drvdata(dev);
+ u32 val;
+
+ val = rot_read(ROT_CONTROL);
+ val &= ~ROT_CONTROL_FMT_MASK;
+
+ switch (fmt) {
+ case DRM_FORMAT_NV12:
+ val |= ROT_CONTROL_FMT_YCBCR420_2P;
+ break;
+ case DRM_FORMAT_XRGB8888:
+ val |= ROT_CONTROL_FMT_RGB888;
+ break;
+ default:
+ DRM_ERROR("invalid image format\n");
+ return -EINVAL;
+ }
+
+ rot_write(val, ROT_CONTROL);
+
+ return 0;
+}
+
+static inline bool rotator_check_reg_fmt(u32 fmt)
+{
+ if ((fmt == ROT_CONTROL_FMT_YCBCR420_2P) ||
+ (fmt == ROT_CONTROL_FMT_RGB888))
+ return true;
+
+ return false;
+}
+
+static int rotator_src_set_size(struct device *dev, int swap,
+ struct drm_exynos_pos *pos,
+ struct drm_exynos_sz *sz)
+{
+ struct rot_context *rot = dev_get_drvdata(dev);
+ u32 fmt, hsize, vsize;
+ u32 val;
+
+ /* Get format */
+ fmt = rotator_reg_get_fmt(rot);
+ if (!rotator_check_reg_fmt(fmt)) {
+ DRM_ERROR("%s:invalid format.\n", __func__);
+ return -EINVAL;
+ }
+
+ /* Align buffer size */
+ hsize = sz->hsize;
+ vsize = sz->vsize;
+ rotator_align_size(rot, fmt, &hsize, &vsize);
+
+ /* Set buffer size configuration */
+ val = ROT_SET_BUF_SIZE_H(vsize) | ROT_SET_BUF_SIZE_W(hsize);
+ rot_write(val, ROT_SRC_BUF_SIZE);
+
+ /* Set crop image position configuration */
+ val = ROT_CROP_POS_Y(pos->y) | ROT_CROP_POS_X(pos->x);
+ rot_write(val, ROT_SRC_CROP_POS);
+ val = ROT_SRC_CROP_SIZE_H(pos->h) | ROT_SRC_CROP_SIZE_W(pos->w);
+ rot_write(val, ROT_SRC_CROP_SIZE);
+
+ return 0;
+}
+
+static int rotator_src_set_addr(struct device *dev,
+ struct drm_exynos_ipp_buf_info *buf_info,
+ u32 buf_id, enum drm_exynos_ipp_buf_type buf_type)
+{
+ struct rot_context *rot = dev_get_drvdata(dev);
+ dma_addr_t addr[EXYNOS_DRM_PLANAR_MAX];
+ u32 val, fmt, hsize, vsize;
+ int i;
+
+ /* Set current buf_id */
+ rot->cur_buf_id[EXYNOS_DRM_OPS_SRC] = buf_id;
+
+ switch (buf_type) {
+ case IPP_BUF_ENQUEUE:
+ /* Set address configuration */
+ for_each_ipp_planar(i)
+ addr[i] = buf_info->base[i];
+
+ /* Get format */
+ fmt = rotator_reg_get_fmt(rot);
+ if (!rotator_check_reg_fmt(fmt)) {
+ DRM_ERROR("%s:invalid format.\n", __func__);
+ return -EINVAL;
+ }
+
+ /* Re-set cb planar for NV12 format */
+ if ((fmt == ROT_CONTROL_FMT_YCBCR420_2P) &&
+ !addr[EXYNOS_DRM_PLANAR_CB]) {
+
+ val = rot_read(ROT_SRC_BUF_SIZE);
+ hsize = ROT_GET_BUF_SIZE_W(val);
+ vsize = ROT_GET_BUF_SIZE_H(val);
+
+ /* Set cb planar */
+ addr[EXYNOS_DRM_PLANAR_CB] =
+ addr[EXYNOS_DRM_PLANAR_Y] + hsize * vsize;
+ }
+
+ for_each_ipp_planar(i)
+ rot_write(addr[i], ROT_SRC_BUF_ADDR(i));
+ break;
+ case IPP_BUF_DEQUEUE:
+ for_each_ipp_planar(i)
+ rot_write(0x0, ROT_SRC_BUF_ADDR(i));
+ break;
+ default:
+ /* Nothing to do */
+ break;
+ }
+
+ return 0;
+}
+
+static int rotator_dst_set_transf(struct device *dev,
+ enum drm_exynos_degree degree,
+ enum drm_exynos_flip flip, bool *swap)
+{
+ struct rot_context *rot = dev_get_drvdata(dev);
+ u32 val;
+
+ /* Set transform configuration */
+ val = rot_read(ROT_CONTROL);
+ val &= ~ROT_CONTROL_FLIP_MASK;
+
+ switch (flip) {
+ case EXYNOS_DRM_FLIP_VERTICAL:
+ val |= ROT_CONTROL_FLIP_VERTICAL;
+ break;
+ case EXYNOS_DRM_FLIP_HORIZONTAL:
+ val |= ROT_CONTROL_FLIP_HORIZONTAL;
+ break;
+ default:
+ /* Flip None */
+ break;
+ }
+
+ val &= ~ROT_CONTROL_ROT_MASK;
+
+ switch (degree) {
+ case EXYNOS_DRM_DEGREE_90:
+ val |= ROT_CONTROL_ROT_90;
+ break;
+ case EXYNOS_DRM_DEGREE_180:
+ val |= ROT_CONTROL_ROT_180;
+ break;
+ case EXYNOS_DRM_DEGREE_270:
+ val |= ROT_CONTROL_ROT_270;
+ break;
+ default:
+ /* Rotation 0 Degree */
+ break;
+ }
+
+ rot_write(val, ROT_CONTROL);
+
+ /* Check degree for setting buffer size swap */
+ if ((degree == EXYNOS_DRM_DEGREE_90) ||
+ (degree == EXYNOS_DRM_DEGREE_270))
+ *swap = true;
+ else
+ *swap = false;
+
+ return 0;
+}
+
+static int rotator_dst_set_size(struct device *dev, int swap,
+ struct drm_exynos_pos *pos,
+ struct drm_exynos_sz *sz)
+{
+ struct rot_context *rot = dev_get_drvdata(dev);
+ u32 val, fmt, hsize, vsize;
+
+ /* Get format */
+ fmt = rotator_reg_get_fmt(rot);
+ if (!rotator_check_reg_fmt(fmt)) {
+ DRM_ERROR("%s:invalid format.\n", __func__);
+ return -EINVAL;
+ }
+
+ /* Align buffer size */
+ hsize = sz->hsize;
+ vsize = sz->vsize;
+ rotator_align_size(rot, fmt, &hsize, &vsize);
+
+ /* Set buffer size configuration */
+ val = ROT_SET_BUF_SIZE_H(vsize) | ROT_SET_BUF_SIZE_W(hsize);
+ rot_write(val, ROT_DST_BUF_SIZE);
+
+ /* Set crop image position configuration */
+ val = ROT_CROP_POS_Y(pos->y) | ROT_CROP_POS_X(pos->x);
+ rot_write(val, ROT_DST_CROP_POS);
+
+ return 0;
+}
+
+static int rotator_dst_set_addr(struct device *dev,
+ struct drm_exynos_ipp_buf_info *buf_info,
+ u32 buf_id, enum drm_exynos_ipp_buf_type buf_type)
+{
+ struct rot_context *rot = dev_get_drvdata(dev);
+ dma_addr_t addr[EXYNOS_DRM_PLANAR_MAX];
+ u32 val, fmt, hsize, vsize;
+ int i;
+
+ /* Set current buf_id */
+ rot->cur_buf_id[EXYNOS_DRM_OPS_DST] = buf_id;
+
+ switch (buf_type) {
+ case IPP_BUF_ENQUEUE:
+ /* Set address configuration */
+ for_each_ipp_planar(i)
+ addr[i] = buf_info->base[i];
+
+ /* Get format */
+ fmt = rotator_reg_get_fmt(rot);
+ if (!rotator_check_reg_fmt(fmt)) {
+ DRM_ERROR("%s:invalid format.\n", __func__);
+ return -EINVAL;
+ }
+
+ /* Re-set cb planar for NV12 format */
+ if ((fmt == ROT_CONTROL_FMT_YCBCR420_2P) &&
+ !addr[EXYNOS_DRM_PLANAR_CB]) {
+ /* Get buf size */
+ val = rot_read(ROT_DST_BUF_SIZE);
+
+ hsize = ROT_GET_BUF_SIZE_W(val);
+ vsize = ROT_GET_BUF_SIZE_H(val);
+
+ /* Set cb planar */
+ addr[EXYNOS_DRM_PLANAR_CB] =
+ addr[EXYNOS_DRM_PLANAR_Y] + hsize * vsize;
+ }
+
+ for_each_ipp_planar(i)
+ rot_write(addr[i], ROT_DST_BUF_ADDR(i));
+ break;
+ case IPP_BUF_DEQUEUE:
+ for_each_ipp_planar(i)
+ rot_write(0x0, ROT_DST_BUF_ADDR(i));
+ break;
+ default:
+ /* Nothing to do */
+ break;
+ }
+
+ return 0;
+}
+
+static struct exynos_drm_ipp_ops rot_src_ops = {
+ .set_fmt = rotator_src_set_fmt,
+ .set_size = rotator_src_set_size,
+ .set_addr = rotator_src_set_addr,
+};
+
+static struct exynos_drm_ipp_ops rot_dst_ops = {
+ .set_transf = rotator_dst_set_transf,
+ .set_size = rotator_dst_set_size,
+ .set_addr = rotator_dst_set_addr,
+};
+
+static int rotator_init_prop_list(struct exynos_drm_ippdrv *ippdrv)
+{
+ struct drm_exynos_ipp_prop_list *prop_list;
+
+ DRM_DEBUG_KMS("%s\n", __func__);
+
+ prop_list = devm_kzalloc(ippdrv->dev, sizeof(*prop_list), GFP_KERNEL);
+ if (!prop_list) {
+ DRM_ERROR("failed to alloc property list.\n");
+ return -ENOMEM;
+ }
+
+ prop_list->version = 1;
+ prop_list->flip = (1 << EXYNOS_DRM_FLIP_VERTICAL) |
+ (1 << EXYNOS_DRM_FLIP_HORIZONTAL);
+ prop_list->degree = (1 << EXYNOS_DRM_DEGREE_0) |
+ (1 << EXYNOS_DRM_DEGREE_90) |
+ (1 << EXYNOS_DRM_DEGREE_180) |
+ (1 << EXYNOS_DRM_DEGREE_270);
+ prop_list->csc = 0;
+ prop_list->crop = 0;
+ prop_list->scale = 0;
+
+ ippdrv->prop_list = prop_list;
+
+ return 0;
+}
+
+static inline bool rotator_check_drm_fmt(u32 fmt)
+{
+ switch (fmt) {
+ case DRM_FORMAT_XRGB8888:
+ case DRM_FORMAT_NV12:
+ return true;
+ default:
+ DRM_DEBUG_KMS("%s:not support format\n", __func__);
+ return false;
+ }
+}
+
+static inline bool rotator_check_drm_flip(enum drm_exynos_flip flip)
+{
+ switch (flip) {
+ case EXYNOS_DRM_FLIP_NONE:
+ case EXYNOS_DRM_FLIP_VERTICAL:
+ case EXYNOS_DRM_FLIP_HORIZONTAL:
+ return true;
+ default:
+ DRM_DEBUG_KMS("%s:invalid flip\n", __func__);
+ return false;
+ }
+}
+
+static int rotator_ippdrv_check_property(struct device *dev,
+ struct drm_exynos_ipp_property *property)
+{
+ struct drm_exynos_ipp_config *src_config =
+ &property->config[EXYNOS_DRM_OPS_SRC];
+ struct drm_exynos_ipp_config *dst_config =
+ &property->config[EXYNOS_DRM_OPS_DST];
+ struct drm_exynos_pos *src_pos = &src_config->pos;
+ struct drm_exynos_pos *dst_pos = &dst_config->pos;
+ struct drm_exynos_sz *src_sz = &src_config->sz;
+ struct drm_exynos_sz *dst_sz = &dst_config->sz;
+ bool swap = false;
+
+ /* Check format configuration */
+ if (src_config->fmt != dst_config->fmt) {
+ DRM_DEBUG_KMS("%s:not support csc feature\n", __func__);
+ return -EINVAL;
+ }
+
+ if (!rotator_check_drm_fmt(dst_config->fmt)) {
+ DRM_DEBUG_KMS("%s:invalid format\n", __func__);
+ return -EINVAL;
+ }
+
+ /* Check transform configuration */
+ if (src_config->degree != EXYNOS_DRM_DEGREE_0) {
+ DRM_DEBUG_KMS("%s:not support source-side rotation\n",
+ __func__);
+ return -EINVAL;
+ }
+
+ switch (dst_config->degree) {
+ case EXYNOS_DRM_DEGREE_90:
+ case EXYNOS_DRM_DEGREE_270:
+ swap = true;
+ case EXYNOS_DRM_DEGREE_0:
+ case EXYNOS_DRM_DEGREE_180:
+ /* No problem */
+ break;
+ default:
+ DRM_DEBUG_KMS("%s:invalid degree\n", __func__);
+ return -EINVAL;
+ }
+
+ if (src_config->flip != EXYNOS_DRM_FLIP_NONE) {
+ DRM_DEBUG_KMS("%s:not support source-side flip\n", __func__);
+ return -EINVAL;
+ }
+
+ if (!rotator_check_drm_flip(dst_config->flip)) {
+ DRM_DEBUG_KMS("%s:invalid flip\n", __func__);
+ return -EINVAL;
+ }
+
+ /* Check size configuration */
+ if ((src_pos->x + src_pos->w > src_sz->hsize) ||
+ (src_pos->y + src_pos->h > src_sz->vsize)) {
+ DRM_DEBUG_KMS("%s:out of source buffer bound\n", __func__);
+ return -EINVAL;
+ }
+
+ if (swap) {
+ if ((dst_pos->x + dst_pos->h > dst_sz->vsize) ||
+ (dst_pos->y + dst_pos->w > dst_sz->hsize)) {
+ DRM_DEBUG_KMS("%s:out of destination buffer bound\n",
+ __func__);
+ return -EINVAL;
+ }
+
+ if ((src_pos->w != dst_pos->h) || (src_pos->h != dst_pos->w)) {
+ DRM_DEBUG_KMS("%s:not support scale feature\n",
+ __func__);
+ return -EINVAL;
+ }
+ } else {
+ if ((dst_pos->x + dst_pos->w > dst_sz->hsize) ||
+ (dst_pos->y + dst_pos->h > dst_sz->vsize)) {
+ DRM_DEBUG_KMS("%s:out of destination buffer bound\n",
+ __func__);
+ return -EINVAL;
+ }
+
+ if ((src_pos->w != dst_pos->w) || (src_pos->h != dst_pos->h)) {
+ DRM_DEBUG_KMS("%s:not support scale feature\n",
+ __func__);
+ return -EINVAL;
+ }
+ }
+
+ return 0;
+}
+
+static int rotator_ippdrv_start(struct device *dev, enum drm_exynos_ipp_cmd cmd)
+{
+ struct rot_context *rot = dev_get_drvdata(dev);
+ u32 val;
+
+ if (rot->suspended) {
+ DRM_ERROR("suspended state\n");
+ return -EPERM;
+ }
+
+ if (cmd != IPP_CMD_M2M) {
+ DRM_ERROR("not support cmd: %d\n", cmd);
+ return -EINVAL;
+ }
+
+ /* Set interrupt enable */
+ rotator_reg_set_irq(rot, true);
+
+ val = rot_read(ROT_CONTROL);
+ val |= ROT_CONTROL_START;
+
+ rot_write(val, ROT_CONTROL);
+
+ return 0;
+}
+
+static int __devinit rotator_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct rot_context *rot;
+ struct exynos_drm_ippdrv *ippdrv;
+ int ret;
+
+ rot = devm_kzalloc(dev, sizeof(*rot), GFP_KERNEL);
+ if (!rot) {
+ dev_err(dev, "failed to allocate rot\n");
+ return -ENOMEM;
+ }
+
+ rot->limit_tbl = (struct rot_limit_table *)
+ platform_get_device_id(pdev)->driver_data;
+
+ rot->regs_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!rot->regs_res) {
+ dev_err(dev, "failed to find registers\n");
+ ret = -ENOENT;
+ goto err_get_resource;
+ }
+
+ rot->regs = devm_request_and_ioremap(dev, rot->regs_res);
+ if (!rot->regs) {
+ dev_err(dev, "failed to map register\n");
+ ret = -ENXIO;
+ goto err_get_resource;
+ }
+
+ rot->irq = platform_get_irq(pdev, 0);
+ if (rot->irq < 0) {
+ dev_err(dev, "failed to get irq\n");
+ ret = rot->irq;
+ goto err_get_irq;
+ }
+
+ ret = request_threaded_irq(rot->irq, NULL, rotator_irq_handler,
+ IRQF_ONESHOT, "drm_rotator", rot);
+ if (ret < 0) {
+ dev_err(dev, "failed to request irq\n");
+ goto err_get_irq;
+ }
+
+ rot->clock = clk_get(dev, "rotator");
+ if (IS_ERR_OR_NULL(rot->clock)) {
+ dev_err(dev, "failed to get clock\n");
+ ret = PTR_ERR(rot->clock);
+ goto err_clk_get;
+ }
+
+ pm_runtime_enable(dev);
+
+ ippdrv = &rot->ippdrv;
+ ippdrv->dev = dev;
+ ippdrv->ops[EXYNOS_DRM_OPS_SRC] = &rot_src_ops;
+ ippdrv->ops[EXYNOS_DRM_OPS_DST] = &rot_dst_ops;
+ ippdrv->check_property = rotator_ippdrv_check_property;
+ ippdrv->start = rotator_ippdrv_start;
+ ret = rotator_init_prop_list(ippdrv);
+ if (ret < 0) {
+ dev_err(dev, "failed to init property list.\n");
+ goto err_ippdrv_register;
+ }
+
+ DRM_DEBUG_KMS("%s:ippdrv[0x%x]\n", __func__, (int)ippdrv);
+
+ platform_set_drvdata(pdev, rot);
+
+ ret = exynos_drm_ippdrv_register(ippdrv);
+ if (ret < 0) {
+ dev_err(dev, "failed to register drm rotator device\n");
+ goto err_ippdrv_register;
+ }
+
+ dev_info(dev, "The exynos rotator is probed successfully\n");
+
+ return 0;
+
+err_ippdrv_register:
+ devm_kfree(dev, ippdrv->prop_list);
+ pm_runtime_disable(dev);
+ clk_put(rot->clock);
+err_clk_get:
+ free_irq(rot->irq, rot);
+err_get_irq:
+ devm_iounmap(dev, rot->regs);
+err_get_resource:
+ devm_kfree(dev, rot);
+ return ret;
+}
+
+static int __devexit rotator_remove(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct rot_context *rot = dev_get_drvdata(dev);
+ struct exynos_drm_ippdrv *ippdrv = &rot->ippdrv;
+
+ devm_kfree(dev, ippdrv->prop_list);
+ exynos_drm_ippdrv_unregister(ippdrv);
+
+ pm_runtime_disable(dev);
+ clk_put(rot->clock);
+
+ free_irq(rot->irq, rot);
+ devm_iounmap(dev, rot->regs);
+
+ devm_kfree(dev, rot);
+
+ return 0;
+}
+
+struct rot_limit_table rot_limit_tbl = {
+ .ycbcr420_2p = {
+ .min_w = 32,
+ .min_h = 32,
+ .max_w = SZ_32K,
+ .max_h = SZ_32K,
+ .align = 3,
+ },
+ .rgb888 = {
+ .min_w = 8,
+ .min_h = 8,
+ .max_w = SZ_8K,
+ .max_h = SZ_8K,
+ .align = 2,
+ },
+};
+
+struct platform_device_id rotator_driver_ids[] = {
+ {
+ .name = "exynos-rot",
+ .driver_data = (unsigned long)&rot_limit_tbl,
+ },
+ {},
+};
+
+static int rotator_clk_crtl(struct rot_context *rot, bool enable)
+{
+ DRM_DEBUG_KMS("%s\n", __func__);
+
+ if (enable) {
+ clk_enable(rot->clock);
+ rot->suspended = false;
+ } else {
+ clk_disable(rot->clock);
+ rot->suspended = true;
+ }
+
+ return 0;
+}
+
+
+#ifdef CONFIG_PM_SLEEP
+static int rotator_suspend(struct device *dev)
+{
+ struct rot_context *rot = dev_get_drvdata(dev);
+
+ DRM_DEBUG_KMS("%s\n", __func__);
+
+ if (pm_runtime_suspended(dev))
+ return 0;
+
+ return rotator_clk_crtl(rot, false);
+}
+
+static int rotator_resume(struct device *dev)
+{
+ struct rot_context *rot = dev_get_drvdata(dev);
+
+ DRM_DEBUG_KMS("%s\n", __func__);
+
+ if (!pm_runtime_suspended(dev))
+ return rotator_clk_crtl(rot, true);
+
+ return 0;
+}
+#endif
+
+#ifdef CONFIG_PM_RUNTIME
+static int rotator_runtime_suspend(struct device *dev)
+{
+ struct rot_context *rot = dev_get_drvdata(dev);
+
+ DRM_DEBUG_KMS("%s\n", __func__);
+
+ return rotator_clk_crtl(rot, false);
+}
+
+static int rotator_runtime_resume(struct device *dev)
+{
+ struct rot_context *rot = dev_get_drvdata(dev);
+
+ DRM_DEBUG_KMS("%s\n", __func__);
+
+ return rotator_clk_crtl(rot, true);
+}
+#endif
+
+static const struct dev_pm_ops rotator_pm_ops = {
+ SET_SYSTEM_SLEEP_PM_OPS(rotator_suspend, rotator_resume)
+ SET_RUNTIME_PM_OPS(rotator_runtime_suspend, rotator_runtime_resume,
+ NULL)
+};
+
+struct platform_driver rotator_driver = {
+ .probe = rotator_probe,
+ .remove = __devexit_p(rotator_remove),
+ .id_table = rotator_driver_ids,
+ .driver = {
+ .name = "exynos-rot",
+ .owner = THIS_MODULE,
+ .pm = &rotator_pm_ops,
+ },
+};
diff --git a/drivers/gpu/drm/exynos/exynos_drm_rotator.h b/drivers/gpu/drm/exynos/exynos_drm_rotator.h
new file mode 100644
index 000000000000..a2d7a14a52b6
--- /dev/null
+++ b/drivers/gpu/drm/exynos/exynos_drm_rotator.h
@@ -0,0 +1,33 @@
+/*
+ * Copyright (c) 2012 Samsung Electronics Co., Ltd.
+ *
+ * Authors:
+ * YoungJun Cho <yj44.cho@samsung.com>
+ * Eunchul Kim <chulspro.kim@samsung.com>
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#ifndef _EXYNOS_DRM_ROTATOR_H_
+#define _EXYNOS_DRM_ROTATOR_H_
+
+/* TODO */
+
+#endif
diff --git a/drivers/gpu/drm/exynos/exynos_drm_vidi.c b/drivers/gpu/drm/exynos/exynos_drm_vidi.c
index e4b8a8f741f7..99bfc38dfaa2 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_vidi.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_vidi.c
@@ -39,7 +39,6 @@ struct vidi_win_data {
unsigned int fb_height;
unsigned int bpp;
dma_addr_t dma_addr;
- void __iomem *vaddr;
unsigned int buf_offsize;
unsigned int line_size; /* bytes */
bool enabled;
@@ -294,7 +293,6 @@ static void vidi_win_mode_set(struct device *dev,
win_data->fb_width = overlay->fb_width;
win_data->fb_height = overlay->fb_height;
win_data->dma_addr = overlay->dma_addr[0] + offset;
- win_data->vaddr = overlay->vaddr[0] + offset;
win_data->bpp = overlay->bpp;
win_data->buf_offsize = (overlay->fb_width - overlay->crtc_width) *
(overlay->bpp >> 3);
@@ -309,9 +307,7 @@ static void vidi_win_mode_set(struct device *dev,
win_data->offset_x, win_data->offset_y);
DRM_DEBUG_KMS("ovl_width = %d, ovl_height = %d\n",
win_data->ovl_width, win_data->ovl_height);
- DRM_DEBUG_KMS("paddr = 0x%lx, vaddr = 0x%lx\n",
- (unsigned long)win_data->dma_addr,
- (unsigned long)win_data->vaddr);
+ DRM_DEBUG_KMS("paddr = 0x%lx\n", (unsigned long)win_data->dma_addr);
DRM_DEBUG_KMS("fb_width = %d, crtc_width = %d\n",
overlay->fb_width, overlay->crtc_width);
}
@@ -382,7 +378,6 @@ static void vidi_finish_pageflip(struct drm_device *drm_dev, int crtc)
struct drm_pending_vblank_event *e, *t;
struct timeval now;
unsigned long flags;
- bool is_checked = false;
spin_lock_irqsave(&drm_dev->event_lock, flags);
@@ -392,8 +387,6 @@ static void vidi_finish_pageflip(struct drm_device *drm_dev, int crtc)
if (crtc != e->pipe)
continue;
- is_checked = true;
-
do_gettimeofday(&now);
e->event.sequence = 0;
e->event.tv_sec = now.tv_sec;
@@ -401,22 +394,7 @@ static void vidi_finish_pageflip(struct drm_device *drm_dev, int crtc)
list_move_tail(&e->base.link, &e->base.file_priv->event_list);
wake_up_interruptible(&e->base.file_priv->event_wait);
- }
-
- if (is_checked) {
- /*
- * call drm_vblank_put only in case that drm_vblank_get was
- * called.
- */
- if (atomic_read(&drm_dev->vblank_refcount[crtc]) > 0)
- drm_vblank_put(drm_dev, crtc);
-
- /*
- * don't off vblank if vblank_disable_allowed is 1,
- * because vblank would be off by timer handler.
- */
- if (!drm_dev->vblank_disable_allowed)
- drm_vblank_off(drm_dev, crtc);
+ drm_vblank_put(drm_dev, crtc);
}
spin_unlock_irqrestore(&drm_dev->event_lock, flags);
diff --git a/drivers/gpu/drm/exynos/exynos_hdmi.c b/drivers/gpu/drm/exynos/exynos_hdmi.c
index 2c115f8a62a3..2c46b6c0b82c 100644
--- a/drivers/gpu/drm/exynos/exynos_hdmi.c
+++ b/drivers/gpu/drm/exynos/exynos_hdmi.c
@@ -50,6 +50,29 @@
#define MAX_HEIGHT 1080
#define get_hdmi_context(dev) platform_get_drvdata(to_platform_device(dev))
+/* AVI header and aspect ratio */
+#define HDMI_AVI_VERSION 0x02
+#define HDMI_AVI_LENGTH 0x0D
+#define AVI_PIC_ASPECT_RATIO_16_9 (2 << 4)
+#define AVI_SAME_AS_PIC_ASPECT_RATIO 8
+
+/* AUI header info */
+#define HDMI_AUI_VERSION 0x01
+#define HDMI_AUI_LENGTH 0x0A
+
+/* HDMI infoframe to configure HDMI out packet header, AUI and AVI */
+enum HDMI_PACKET_TYPE {
+ /* refer to Table 5-8 Packet Type in HDMI specification v1.4a */
+ /* InfoFrame packet type */
+ HDMI_PACKET_TYPE_INFOFRAME = 0x80,
+ /* Vendor-Specific InfoFrame */
+ HDMI_PACKET_TYPE_VSI = HDMI_PACKET_TYPE_INFOFRAME + 1,
+ /* Auxiliary Video information InfoFrame */
+ HDMI_PACKET_TYPE_AVI = HDMI_PACKET_TYPE_INFOFRAME + 2,
+ /* Audio information InfoFrame */
+ HDMI_PACKET_TYPE_AUI = HDMI_PACKET_TYPE_INFOFRAME + 4
+};
+
enum hdmi_type {
HDMI_TYPE13,
HDMI_TYPE14,
@@ -74,6 +97,7 @@ struct hdmi_context {
struct mutex hdmi_mutex;
void __iomem *regs;
+ void *parent_ctx;
int external_irq;
int internal_irq;
@@ -84,7 +108,6 @@ struct hdmi_context {
int cur_conf;
struct hdmi_resources res;
- void *parent_ctx;
int hpd_gpio;
@@ -182,6 +205,7 @@ struct hdmi_v13_conf {
int height;
int vrefresh;
bool interlace;
+ int cea_video_id;
const u8 *hdmiphy_data;
const struct hdmi_v13_preset_conf *conf;
};
@@ -353,15 +377,20 @@ static const struct hdmi_v13_preset_conf hdmi_v13_conf_1080p60 = {
};
static const struct hdmi_v13_conf hdmi_v13_confs[] = {
- { 1280, 720, 60, false, hdmiphy_v13_conf74_25, &hdmi_v13_conf_720p60 },
- { 1280, 720, 50, false, hdmiphy_v13_conf74_25, &hdmi_v13_conf_720p60 },
- { 720, 480, 60, false, hdmiphy_v13_conf27_027, &hdmi_v13_conf_480p },
- { 1920, 1080, 50, true, hdmiphy_v13_conf74_25, &hdmi_v13_conf_1080i50 },
- { 1920, 1080, 50, false, hdmiphy_v13_conf148_5,
- &hdmi_v13_conf_1080p50 },
- { 1920, 1080, 60, true, hdmiphy_v13_conf74_25, &hdmi_v13_conf_1080i60 },
- { 1920, 1080, 60, false, hdmiphy_v13_conf148_5,
- &hdmi_v13_conf_1080p60 },
+ { 1280, 720, 60, false, 4, hdmiphy_v13_conf74_25,
+ &hdmi_v13_conf_720p60 },
+ { 1280, 720, 50, false, 19, hdmiphy_v13_conf74_25,
+ &hdmi_v13_conf_720p60 },
+ { 720, 480, 60, false, 3, hdmiphy_v13_conf27_027,
+ &hdmi_v13_conf_480p },
+ { 1920, 1080, 50, true, 20, hdmiphy_v13_conf74_25,
+ &hdmi_v13_conf_1080i50 },
+ { 1920, 1080, 50, false, 31, hdmiphy_v13_conf148_5,
+ &hdmi_v13_conf_1080p50 },
+ { 1920, 1080, 60, true, 5, hdmiphy_v13_conf74_25,
+ &hdmi_v13_conf_1080i60 },
+ { 1920, 1080, 60, false, 16, hdmiphy_v13_conf148_5,
+ &hdmi_v13_conf_1080p60 },
};
/* HDMI Version 1.4 */
@@ -479,6 +508,7 @@ struct hdmi_conf {
int height;
int vrefresh;
bool interlace;
+ int cea_video_id;
const u8 *hdmiphy_data;
const struct hdmi_preset_conf *conf;
};
@@ -934,16 +964,21 @@ static const struct hdmi_preset_conf hdmi_conf_1080p60 = {
};
static const struct hdmi_conf hdmi_confs[] = {
- { 720, 480, 60, false, hdmiphy_conf27_027, &hdmi_conf_480p60 },
- { 1280, 720, 50, false, hdmiphy_conf74_25, &hdmi_conf_720p50 },
- { 1280, 720, 60, false, hdmiphy_conf74_25, &hdmi_conf_720p60 },
- { 1920, 1080, 50, true, hdmiphy_conf74_25, &hdmi_conf_1080i50 },
- { 1920, 1080, 60, true, hdmiphy_conf74_25, &hdmi_conf_1080i60 },
- { 1920, 1080, 30, false, hdmiphy_conf74_176, &hdmi_conf_1080p30 },
- { 1920, 1080, 50, false, hdmiphy_conf148_5, &hdmi_conf_1080p50 },
- { 1920, 1080, 60, false, hdmiphy_conf148_5, &hdmi_conf_1080p60 },
+ { 720, 480, 60, false, 3, hdmiphy_conf27_027, &hdmi_conf_480p60 },
+ { 1280, 720, 50, false, 19, hdmiphy_conf74_25, &hdmi_conf_720p50 },
+ { 1280, 720, 60, false, 4, hdmiphy_conf74_25, &hdmi_conf_720p60 },
+ { 1920, 1080, 50, true, 20, hdmiphy_conf74_25, &hdmi_conf_1080i50 },
+ { 1920, 1080, 60, true, 5, hdmiphy_conf74_25, &hdmi_conf_1080i60 },
+ { 1920, 1080, 30, false, 34, hdmiphy_conf74_176, &hdmi_conf_1080p30 },
+ { 1920, 1080, 50, false, 31, hdmiphy_conf148_5, &hdmi_conf_1080p50 },
+ { 1920, 1080, 60, false, 16, hdmiphy_conf148_5, &hdmi_conf_1080p60 },
};
+struct hdmi_infoframe {
+ enum HDMI_PACKET_TYPE type;
+ u8 ver;
+ u8 len;
+};
static inline u32 hdmi_reg_read(struct hdmi_context *hdata, u32 reg_id)
{
@@ -1267,6 +1302,88 @@ static int hdmi_conf_index(struct hdmi_context *hdata,
return hdmi_v14_conf_index(mode);
}
+static u8 hdmi_chksum(struct hdmi_context *hdata,
+ u32 start, u8 len, u32 hdr_sum)
+{
+ int i;
+
+ /* hdr_sum : header0 + header1 + header2
+ * start : start address of packet byte1
+ * len : packet bytes - 1 */
+ for (i = 0; i < len; ++i)
+ hdr_sum += 0xff & hdmi_reg_read(hdata, start + i * 4);
+
+ /* return 2's complement of 8 bit hdr_sum */
+ return (u8)(~(hdr_sum & 0xff) + 1);
+}
+
+static void hdmi_reg_infoframe(struct hdmi_context *hdata,
+ struct hdmi_infoframe *infoframe)
+{
+ u32 hdr_sum;
+ u8 chksum;
+ u32 aspect_ratio;
+ u32 mod;
+ u32 vic;
+
+ DRM_DEBUG_KMS("[%d] %s\n", __LINE__, __func__);
+
+ mod = hdmi_reg_read(hdata, HDMI_MODE_SEL);
+ if (hdata->dvi_mode) {
+ hdmi_reg_writeb(hdata, HDMI_VSI_CON,
+ HDMI_VSI_CON_DO_NOT_TRANSMIT);
+ hdmi_reg_writeb(hdata, HDMI_AVI_CON,
+ HDMI_AVI_CON_DO_NOT_TRANSMIT);
+ hdmi_reg_writeb(hdata, HDMI_AUI_CON, HDMI_AUI_CON_NO_TRAN);
+ return;
+ }
+
+ switch (infoframe->type) {
+ case HDMI_PACKET_TYPE_AVI:
+ hdmi_reg_writeb(hdata, HDMI_AVI_CON, HDMI_AVI_CON_EVERY_VSYNC);
+ hdmi_reg_writeb(hdata, HDMI_AVI_HEADER0, infoframe->type);
+ hdmi_reg_writeb(hdata, HDMI_AVI_HEADER1, infoframe->ver);
+ hdmi_reg_writeb(hdata, HDMI_AVI_HEADER2, infoframe->len);
+ hdr_sum = infoframe->type + infoframe->ver + infoframe->len;
+
+ /* Output format zero hardcoded ,RGB YBCR selection */
+ hdmi_reg_writeb(hdata, HDMI_AVI_BYTE(1), 0 << 5 |
+ AVI_ACTIVE_FORMAT_VALID |
+ AVI_UNDERSCANNED_DISPLAY_VALID);
+
+ aspect_ratio = AVI_PIC_ASPECT_RATIO_16_9;
+
+ hdmi_reg_writeb(hdata, HDMI_AVI_BYTE(2), aspect_ratio |
+ AVI_SAME_AS_PIC_ASPECT_RATIO);
+
+ if (hdata->type == HDMI_TYPE13)
+ vic = hdmi_v13_confs[hdata->cur_conf].cea_video_id;
+ else
+ vic = hdmi_confs[hdata->cur_conf].cea_video_id;
+
+ hdmi_reg_writeb(hdata, HDMI_AVI_BYTE(4), vic);
+
+ chksum = hdmi_chksum(hdata, HDMI_AVI_BYTE(1),
+ infoframe->len, hdr_sum);
+ DRM_DEBUG_KMS("AVI checksum = 0x%x\n", chksum);
+ hdmi_reg_writeb(hdata, HDMI_AVI_CHECK_SUM, chksum);
+ break;
+ case HDMI_PACKET_TYPE_AUI:
+ hdmi_reg_writeb(hdata, HDMI_AUI_CON, 0x02);
+ hdmi_reg_writeb(hdata, HDMI_AUI_HEADER0, infoframe->type);
+ hdmi_reg_writeb(hdata, HDMI_AUI_HEADER1, infoframe->ver);
+ hdmi_reg_writeb(hdata, HDMI_AUI_HEADER2, infoframe->len);
+ hdr_sum = infoframe->type + infoframe->ver + infoframe->len;
+ chksum = hdmi_chksum(hdata, HDMI_AUI_BYTE(1),
+ infoframe->len, hdr_sum);
+ DRM_DEBUG_KMS("AUI checksum = 0x%x\n", chksum);
+ hdmi_reg_writeb(hdata, HDMI_AUI_CHECK_SUM, chksum);
+ break;
+ default:
+ break;
+ }
+}
+
static bool hdmi_is_connected(void *ctx)
{
struct hdmi_context *hdata = ctx;
@@ -1293,6 +1410,7 @@ static int hdmi_get_edid(void *ctx, struct drm_connector *connector,
DRM_DEBUG_KMS("%s : width[%d] x height[%d]\n",
(hdata->dvi_mode ? "dvi monitor" : "hdmi monitor"),
raw_edid->width_cm, raw_edid->height_cm);
+ kfree(raw_edid);
} else {
return -ENODEV;
}
@@ -1541,6 +1659,8 @@ static void hdmi_conf_reset(struct hdmi_context *hdata)
static void hdmi_conf_init(struct hdmi_context *hdata)
{
+ struct hdmi_infoframe infoframe;
+
/* disable HPD interrupts */
hdmi_reg_writemask(hdata, HDMI_INTC_CON, 0, HDMI_INTC_EN_GLOBAL |
HDMI_INTC_EN_HPD_PLUG | HDMI_INTC_EN_HPD_UNPLUG);
@@ -1575,9 +1695,17 @@ static void hdmi_conf_init(struct hdmi_context *hdata)
hdmi_reg_writeb(hdata, HDMI_V13_AUI_CON, 0x02);
hdmi_reg_writeb(hdata, HDMI_V13_ACR_CON, 0x04);
} else {
+ infoframe.type = HDMI_PACKET_TYPE_AVI;
+ infoframe.ver = HDMI_AVI_VERSION;
+ infoframe.len = HDMI_AVI_LENGTH;
+ hdmi_reg_infoframe(hdata, &infoframe);
+
+ infoframe.type = HDMI_PACKET_TYPE_AUI;
+ infoframe.ver = HDMI_AUI_VERSION;
+ infoframe.len = HDMI_AUI_LENGTH;
+ hdmi_reg_infoframe(hdata, &infoframe);
+
/* enable AVI packet every vsync, fixes purple line problem */
- hdmi_reg_writeb(hdata, HDMI_AVI_CON, 0x02);
- hdmi_reg_writeb(hdata, HDMI_AVI_BYTE(1), 2 << 5);
hdmi_reg_writemask(hdata, HDMI_CON_1, 2, 3 << 5);
}
}
@@ -1875,6 +2003,24 @@ static void hdmiphy_conf_reset(struct hdmi_context *hdata)
mdelay(10);
}
+static void hdmiphy_poweron(struct hdmi_context *hdata)
+{
+ DRM_DEBUG_KMS("[%d] %s\n", __LINE__, __func__);
+
+ if (hdata->type == HDMI_TYPE14)
+ hdmi_reg_writemask(hdata, HDMI_PHY_CON_0, 0,
+ HDMI_PHY_POWER_OFF_EN);
+}
+
+static void hdmiphy_poweroff(struct hdmi_context *hdata)
+{
+ DRM_DEBUG_KMS("[%d] %s\n", __LINE__, __func__);
+
+ if (hdata->type == HDMI_TYPE14)
+ hdmi_reg_writemask(hdata, HDMI_PHY_CON_0, ~0,
+ HDMI_PHY_POWER_OFF_EN);
+}
+
static void hdmiphy_conf_apply(struct hdmi_context *hdata)
{
const u8 *hdmiphy_data;
@@ -1978,9 +2124,18 @@ static void hdmi_mode_fixup(void *ctx, struct drm_connector *connector,
index = hdmi_v14_conf_index(m);
if (index >= 0) {
+ struct drm_mode_object base;
+ struct list_head head;
+
DRM_INFO("desired mode doesn't exist so\n");
DRM_INFO("use the most suitable mode among modes.\n");
+
+ /* preserve display mode header while copying. */
+ head = adjusted_mode->head;
+ base = adjusted_mode->base;
memcpy(adjusted_mode, m, sizeof(*m));
+ adjusted_mode->head = head;
+ adjusted_mode->base = base;
break;
}
}
@@ -2034,12 +2189,12 @@ static void hdmi_poweron(struct hdmi_context *hdata)
mutex_unlock(&hdata->hdmi_mutex);
- pm_runtime_get_sync(hdata->dev);
-
regulator_bulk_enable(res->regul_count, res->regul_bulk);
clk_enable(res->hdmiphy);
clk_enable(res->hdmi);
clk_enable(res->sclk_hdmi);
+
+ hdmiphy_poweron(hdata);
}
static void hdmi_poweroff(struct hdmi_context *hdata)
@@ -2058,14 +2213,13 @@ static void hdmi_poweroff(struct hdmi_context *hdata)
* its reset state seems to meet the condition.
*/
hdmiphy_conf_reset(hdata);
+ hdmiphy_poweroff(hdata);
clk_disable(res->sclk_hdmi);
clk_disable(res->hdmi);
clk_disable(res->hdmiphy);
regulator_bulk_disable(res->regul_count, res->regul_bulk);
- pm_runtime_put_sync(hdata->dev);
-
mutex_lock(&hdata->hdmi_mutex);
hdata->powered = false;
@@ -2078,16 +2232,18 @@ static void hdmi_dpms(void *ctx, int mode)
{
struct hdmi_context *hdata = ctx;
- DRM_DEBUG_KMS("[%d] %s\n", __LINE__, __func__);
+ DRM_DEBUG_KMS("[%d] %s mode %d\n", __LINE__, __func__, mode);
switch (mode) {
case DRM_MODE_DPMS_ON:
- hdmi_poweron(hdata);
+ if (pm_runtime_suspended(hdata->dev))
+ pm_runtime_get_sync(hdata->dev);
break;
case DRM_MODE_DPMS_STANDBY:
case DRM_MODE_DPMS_SUSPEND:
case DRM_MODE_DPMS_OFF:
- hdmi_poweroff(hdata);
+ if (!pm_runtime_suspended(hdata->dev))
+ pm_runtime_put_sync(hdata->dev);
break;
default:
DRM_DEBUG_KMS("unknown dpms mode: %d\n", mode);
@@ -2166,27 +2322,27 @@ static int __devinit hdmi_resources_init(struct hdmi_context *hdata)
memset(res, 0, sizeof(*res));
/* get clocks, power */
- res->hdmi = clk_get(dev, "hdmi");
+ res->hdmi = devm_clk_get(dev, "hdmi");
if (IS_ERR_OR_NULL(res->hdmi)) {
DRM_ERROR("failed to get clock 'hdmi'\n");
goto fail;
}
- res->sclk_hdmi = clk_get(dev, "sclk_hdmi");
+ res->sclk_hdmi = devm_clk_get(dev, "sclk_hdmi");
if (IS_ERR_OR_NULL(res->sclk_hdmi)) {
DRM_ERROR("failed to get clock 'sclk_hdmi'\n");
goto fail;
}
- res->sclk_pixel = clk_get(dev, "sclk_pixel");
+ res->sclk_pixel = devm_clk_get(dev, "sclk_pixel");
if (IS_ERR_OR_NULL(res->sclk_pixel)) {
DRM_ERROR("failed to get clock 'sclk_pixel'\n");
goto fail;
}
- res->sclk_hdmiphy = clk_get(dev, "sclk_hdmiphy");
+ res->sclk_hdmiphy = devm_clk_get(dev, "sclk_hdmiphy");
if (IS_ERR_OR_NULL(res->sclk_hdmiphy)) {
DRM_ERROR("failed to get clock 'sclk_hdmiphy'\n");
goto fail;
}
- res->hdmiphy = clk_get(dev, "hdmiphy");
+ res->hdmiphy = devm_clk_get(dev, "hdmiphy");
if (IS_ERR_OR_NULL(res->hdmiphy)) {
DRM_ERROR("failed to get clock 'hdmiphy'\n");
goto fail;
@@ -2194,7 +2350,7 @@ static int __devinit hdmi_resources_init(struct hdmi_context *hdata)
clk_set_parent(res->sclk_hdmi, res->sclk_pixel);
- res->regul_bulk = kzalloc(ARRAY_SIZE(supply) *
+ res->regul_bulk = devm_kzalloc(dev, ARRAY_SIZE(supply) *
sizeof(res->regul_bulk[0]), GFP_KERNEL);
if (!res->regul_bulk) {
DRM_ERROR("failed to get memory for regulators\n");
@@ -2204,7 +2360,7 @@ static int __devinit hdmi_resources_init(struct hdmi_context *hdata)
res->regul_bulk[i].supply = supply[i];
res->regul_bulk[i].consumer = NULL;
}
- ret = regulator_bulk_get(dev, ARRAY_SIZE(supply), res->regul_bulk);
+ ret = devm_regulator_bulk_get(dev, ARRAY_SIZE(supply), res->regul_bulk);
if (ret) {
DRM_ERROR("failed to get regulators\n");
goto fail;
@@ -2217,28 +2373,6 @@ fail:
return -ENODEV;
}
-static int hdmi_resources_cleanup(struct hdmi_context *hdata)
-{
- struct hdmi_resources *res = &hdata->res;
-
- regulator_bulk_free(res->regul_count, res->regul_bulk);
- /* kfree is NULL-safe */
- kfree(res->regul_bulk);
- if (!IS_ERR_OR_NULL(res->hdmiphy))
- clk_put(res->hdmiphy);
- if (!IS_ERR_OR_NULL(res->sclk_hdmiphy))
- clk_put(res->sclk_hdmiphy);
- if (!IS_ERR_OR_NULL(res->sclk_pixel))
- clk_put(res->sclk_pixel);
- if (!IS_ERR_OR_NULL(res->sclk_hdmi))
- clk_put(res->sclk_hdmi);
- if (!IS_ERR_OR_NULL(res->hdmi))
- clk_put(res->hdmi);
- memset(res, 0, sizeof(*res));
-
- return 0;
-}
-
static struct i2c_client *hdmi_ddc, *hdmi_hdmiphy;
void hdmi_attach_ddc_client(struct i2c_client *ddc)
@@ -2306,6 +2440,7 @@ static struct platform_device_id hdmi_driver_types[] = {
}
};
+#ifdef CONFIG_OF
static struct of_device_id hdmi_match_types[] = {
{
.compatible = "samsung,exynos5-hdmi",
@@ -2314,6 +2449,7 @@ static struct of_device_id hdmi_match_types[] = {
/* end node */
}
};
+#endif
static int __devinit hdmi_probe(struct platform_device *pdev)
{
@@ -2366,6 +2502,8 @@ static int __devinit hdmi_probe(struct platform_device *pdev)
const struct of_device_id *match;
match = of_match_node(of_match_ptr(hdmi_match_types),
pdev->dev.of_node);
+ if (match == NULL)
+ return -ENODEV;
hdata->type = (enum hdmi_type)match->data;
} else {
hdata->type = (enum hdmi_type)platform_get_device_id
@@ -2378,36 +2516,32 @@ static int __devinit hdmi_probe(struct platform_device *pdev)
ret = hdmi_resources_init(hdata);
if (ret) {
- ret = -EINVAL;
DRM_ERROR("hdmi_resources_init failed\n");
- goto err_data;
+ return -EINVAL;
}
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!res) {
DRM_ERROR("failed to find registers\n");
- ret = -ENOENT;
- goto err_resource;
+ return -ENOENT;
}
hdata->regs = devm_request_and_ioremap(&pdev->dev, res);
if (!hdata->regs) {
DRM_ERROR("failed to map registers\n");
- ret = -ENXIO;
- goto err_resource;
+ return -ENXIO;
}
- ret = gpio_request(hdata->hpd_gpio, "HPD");
+ ret = devm_gpio_request(&pdev->dev, hdata->hpd_gpio, "HPD");
if (ret) {
DRM_ERROR("failed to request HPD gpio\n");
- goto err_resource;
+ return ret;
}
/* DDC i2c driver */
if (i2c_add_driver(&ddc_driver)) {
DRM_ERROR("failed to register ddc i2c driver\n");
- ret = -ENOENT;
- goto err_gpio;
+ return -ENOENT;
}
hdata->ddc_port = hdmi_ddc;
@@ -2470,11 +2604,6 @@ err_hdmiphy:
i2c_del_driver(&hdmiphy_driver);
err_ddc:
i2c_del_driver(&ddc_driver);
-err_gpio:
- gpio_free(hdata->hpd_gpio);
-err_resource:
- hdmi_resources_cleanup(hdata);
-err_data:
return ret;
}
@@ -2491,9 +2620,6 @@ static int __devexit hdmi_remove(struct platform_device *pdev)
free_irq(hdata->internal_irq, hdata);
free_irq(hdata->external_irq, hdata);
- gpio_free(hdata->hpd_gpio);
-
- hdmi_resources_cleanup(hdata);
/* hdmiphy i2c driver */
i2c_del_driver(&hdmiphy_driver);
@@ -2509,6 +2635,8 @@ static int hdmi_suspend(struct device *dev)
struct exynos_drm_hdmi_context *ctx = get_hdmi_context(dev);
struct hdmi_context *hdata = ctx->ctx;
+ DRM_DEBUG_KMS("[%d] %s\n", __LINE__, __func__);
+
disable_irq(hdata->internal_irq);
disable_irq(hdata->external_irq);
@@ -2516,6 +2644,11 @@ static int hdmi_suspend(struct device *dev)
if (ctx->drm_dev)
drm_helper_hpd_irq_event(ctx->drm_dev);
+ if (pm_runtime_suspended(dev)) {
+ DRM_DEBUG_KMS("%s : Already suspended\n", __func__);
+ return 0;
+ }
+
hdmi_poweroff(hdata);
return 0;
@@ -2526,13 +2659,52 @@ static int hdmi_resume(struct device *dev)
struct exynos_drm_hdmi_context *ctx = get_hdmi_context(dev);
struct hdmi_context *hdata = ctx->ctx;
+ DRM_DEBUG_KMS("[%d] %s\n", __LINE__, __func__);
+
+ hdata->hpd = gpio_get_value(hdata->hpd_gpio);
+
enable_irq(hdata->external_irq);
enable_irq(hdata->internal_irq);
+
+ if (!pm_runtime_suspended(dev)) {
+ DRM_DEBUG_KMS("%s : Already resumed\n", __func__);
+ return 0;
+ }
+
+ hdmi_poweron(hdata);
+
return 0;
}
#endif
-static SIMPLE_DEV_PM_OPS(hdmi_pm_ops, hdmi_suspend, hdmi_resume);
+#ifdef CONFIG_PM_RUNTIME
+static int hdmi_runtime_suspend(struct device *dev)
+{
+ struct exynos_drm_hdmi_context *ctx = get_hdmi_context(dev);
+ struct hdmi_context *hdata = ctx->ctx;
+ DRM_DEBUG_KMS("[%d] %s\n", __LINE__, __func__);
+
+ hdmi_poweroff(hdata);
+
+ return 0;
+}
+
+static int hdmi_runtime_resume(struct device *dev)
+{
+ struct exynos_drm_hdmi_context *ctx = get_hdmi_context(dev);
+ struct hdmi_context *hdata = ctx->ctx;
+ DRM_DEBUG_KMS("[%d] %s\n", __LINE__, __func__);
+
+ hdmi_poweron(hdata);
+
+ return 0;
+}
+#endif
+
+static const struct dev_pm_ops hdmi_pm_ops = {
+ SET_SYSTEM_SLEEP_PM_OPS(hdmi_suspend, hdmi_resume)
+ SET_RUNTIME_PM_OPS(hdmi_runtime_suspend, hdmi_runtime_resume, NULL)
+};
struct platform_driver hdmi_driver = {
.probe = hdmi_probe,
@@ -2542,6 +2714,6 @@ struct platform_driver hdmi_driver = {
.name = "exynos-hdmi",
.owner = THIS_MODULE,
.pm = &hdmi_pm_ops,
- .of_match_table = hdmi_match_types,
+ .of_match_table = of_match_ptr(hdmi_match_types),
},
};
diff --git a/drivers/gpu/drm/exynos/exynos_hdmiphy.c b/drivers/gpu/drm/exynos/exynos_hdmiphy.c
index 27d1720f1bbd..6206056f4a33 100644
--- a/drivers/gpu/drm/exynos/exynos_hdmiphy.c
+++ b/drivers/gpu/drm/exynos/exynos_hdmiphy.c
@@ -46,6 +46,7 @@ static const struct i2c_device_id hdmiphy_id[] = {
{ },
};
+#ifdef CONFIG_OF
static struct of_device_id hdmiphy_match_types[] = {
{
.compatible = "samsung,exynos5-hdmiphy",
@@ -53,12 +54,13 @@ static struct of_device_id hdmiphy_match_types[] = {
/* end node */
}
};
+#endif
struct i2c_driver hdmiphy_driver = {
.driver = {
.name = "exynos-hdmiphy",
.owner = THIS_MODULE,
- .of_match_table = hdmiphy_match_types,
+ .of_match_table = of_match_ptr(hdmiphy_match_types),
},
.id_table = hdmiphy_id,
.probe = hdmiphy_probe,
diff --git a/drivers/gpu/drm/exynos/exynos_mixer.c b/drivers/gpu/drm/exynos/exynos_mixer.c
index e7fbb823fd8e..21db89530fc7 100644
--- a/drivers/gpu/drm/exynos/exynos_mixer.c
+++ b/drivers/gpu/drm/exynos/exynos_mixer.c
@@ -36,14 +36,13 @@
#include "exynos_drm_drv.h"
#include "exynos_drm_hdmi.h"
+#include "exynos_drm_iommu.h"
#define get_mixer_context(dev) platform_get_drvdata(to_platform_device(dev))
struct hdmi_win_data {
dma_addr_t dma_addr;
- void __iomem *vaddr;
dma_addr_t chroma_dma_addr;
- void __iomem *chroma_vaddr;
uint32_t pixel_format;
unsigned int bpp;
unsigned int crtc_x;
@@ -59,6 +58,8 @@ struct hdmi_win_data {
unsigned int mode_width;
unsigned int mode_height;
unsigned int scan_flags;
+ bool enabled;
+ bool resume;
};
struct mixer_resources {
@@ -80,6 +81,7 @@ enum mixer_version_id {
struct mixer_context {
struct device *dev;
+ struct drm_device *drm_dev;
int pipe;
bool interlace;
bool powered;
@@ -90,6 +92,9 @@ struct mixer_context {
struct mixer_resources mixer_res;
struct hdmi_win_data win_data[MIXER_WIN_NR];
enum mixer_version_id mxr_ver;
+ void *parent_ctx;
+ wait_queue_head_t wait_vsync_queue;
+ atomic_t wait_vsync_event;
};
struct mixer_drv_data {
@@ -665,58 +670,22 @@ static void mixer_win_reset(struct mixer_context *ctx)
spin_unlock_irqrestore(&res->reg_slock, flags);
}
-static void mixer_poweron(struct mixer_context *ctx)
-{
- struct mixer_resources *res = &ctx->mixer_res;
-
- DRM_DEBUG_KMS("[%d] %s\n", __LINE__, __func__);
-
- mutex_lock(&ctx->mixer_mutex);
- if (ctx->powered) {
- mutex_unlock(&ctx->mixer_mutex);
- return;
- }
- ctx->powered = true;
- mutex_unlock(&ctx->mixer_mutex);
-
- pm_runtime_get_sync(ctx->dev);
-
- clk_enable(res->mixer);
- if (ctx->vp_enabled) {
- clk_enable(res->vp);
- clk_enable(res->sclk_mixer);
- }
-
- mixer_reg_write(res, MXR_INT_EN, ctx->int_en);
- mixer_win_reset(ctx);
-}
-
-static void mixer_poweroff(struct mixer_context *ctx)
+static int mixer_iommu_on(void *ctx, bool enable)
{
- struct mixer_resources *res = &ctx->mixer_res;
-
- DRM_DEBUG_KMS("[%d] %s\n", __LINE__, __func__);
+ struct exynos_drm_hdmi_context *drm_hdmi_ctx;
+ struct mixer_context *mdata = ctx;
+ struct drm_device *drm_dev;
- mutex_lock(&ctx->mixer_mutex);
- if (!ctx->powered)
- goto out;
- mutex_unlock(&ctx->mixer_mutex);
+ drm_hdmi_ctx = mdata->parent_ctx;
+ drm_dev = drm_hdmi_ctx->drm_dev;
- ctx->int_en = mixer_reg_read(res, MXR_INT_EN);
+ if (is_drm_iommu_supported(drm_dev)) {
+ if (enable)
+ return drm_iommu_attach_device(drm_dev, mdata->dev);
- clk_disable(res->mixer);
- if (ctx->vp_enabled) {
- clk_disable(res->vp);
- clk_disable(res->sclk_mixer);
+ drm_iommu_detach_device(drm_dev, mdata->dev);
}
-
- pm_runtime_put_sync(ctx->dev);
-
- mutex_lock(&ctx->mixer_mutex);
- ctx->powered = false;
-
-out:
- mutex_unlock(&ctx->mixer_mutex);
+ return 0;
}
static int mixer_enable_vblank(void *ctx, int pipe)
@@ -746,39 +715,6 @@ static void mixer_disable_vblank(void *ctx)
mixer_reg_writemask(res, MXR_INT_EN, 0, MXR_INT_EN_VSYNC);
}
-static void mixer_dpms(void *ctx, int mode)
-{
- struct mixer_context *mixer_ctx = ctx;
-
- DRM_DEBUG_KMS("[%d] %s\n", __LINE__, __func__);
-
- switch (mode) {
- case DRM_MODE_DPMS_ON:
- mixer_poweron(mixer_ctx);
- break;
- case DRM_MODE_DPMS_STANDBY:
- case DRM_MODE_DPMS_SUSPEND:
- case DRM_MODE_DPMS_OFF:
- mixer_poweroff(mixer_ctx);
- break;
- default:
- DRM_DEBUG_KMS("unknown dpms mode: %d\n", mode);
- break;
- }
-}
-
-static void mixer_wait_for_vblank(void *ctx)
-{
- struct mixer_context *mixer_ctx = ctx;
- struct mixer_resources *res = &mixer_ctx->mixer_res;
- int ret;
-
- ret = wait_for((mixer_reg_read(res, MXR_INT_STATUS) &
- MXR_INT_STATUS_VSYNC), 50);
- if (ret < 0)
- DRM_DEBUG_KMS("vblank wait timed out.\n");
-}
-
static void mixer_win_mode_set(void *ctx,
struct exynos_drm_overlay *overlay)
{
@@ -811,9 +747,7 @@ static void mixer_win_mode_set(void *ctx,
win_data = &mixer_ctx->win_data[win];
win_data->dma_addr = overlay->dma_addr[0];
- win_data->vaddr = overlay->vaddr[0];
win_data->chroma_dma_addr = overlay->dma_addr[1];
- win_data->chroma_vaddr = overlay->vaddr[1];
win_data->pixel_format = overlay->pixel_format;
win_data->bpp = overlay->bpp;
@@ -845,6 +779,8 @@ static void mixer_win_commit(void *ctx, int win)
vp_video_buffer(mixer_ctx, win);
else
mixer_graph_buffer(mixer_ctx, win);
+
+ mixer_ctx->win_data[win].enabled = true;
}
static void mixer_win_disable(void *ctx, int win)
@@ -855,6 +791,14 @@ static void mixer_win_disable(void *ctx, int win)
DRM_DEBUG_KMS("[%d] %s, win: %d\n", __LINE__, __func__, win);
+ mutex_lock(&mixer_ctx->mixer_mutex);
+ if (!mixer_ctx->powered) {
+ mutex_unlock(&mixer_ctx->mixer_mutex);
+ mixer_ctx->win_data[win].resume = false;
+ return;
+ }
+ mutex_unlock(&mixer_ctx->mixer_mutex);
+
spin_lock_irqsave(&res->reg_slock, flags);
mixer_vsync_set_update(mixer_ctx, false);
@@ -862,16 +806,144 @@ static void mixer_win_disable(void *ctx, int win)
mixer_vsync_set_update(mixer_ctx, true);
spin_unlock_irqrestore(&res->reg_slock, flags);
+
+ mixer_ctx->win_data[win].enabled = false;
+}
+
+static void mixer_wait_for_vblank(void *ctx)
+{
+ struct mixer_context *mixer_ctx = ctx;
+
+ mutex_lock(&mixer_ctx->mixer_mutex);
+ if (!mixer_ctx->powered) {
+ mutex_unlock(&mixer_ctx->mixer_mutex);
+ return;
+ }
+ mutex_unlock(&mixer_ctx->mixer_mutex);
+
+ atomic_set(&mixer_ctx->wait_vsync_event, 1);
+
+ /*
+ * wait for MIXER to signal VSYNC interrupt or return after
+ * timeout which is set to 50ms (refresh rate of 20).
+ */
+ if (!wait_event_timeout(mixer_ctx->wait_vsync_queue,
+ !atomic_read(&mixer_ctx->wait_vsync_event),
+ DRM_HZ/20))
+ DRM_DEBUG_KMS("vblank wait timed out.\n");
+}
+
+static void mixer_window_suspend(struct mixer_context *ctx)
+{
+ struct hdmi_win_data *win_data;
+ int i;
+
+ for (i = 0; i < MIXER_WIN_NR; i++) {
+ win_data = &ctx->win_data[i];
+ win_data->resume = win_data->enabled;
+ mixer_win_disable(ctx, i);
+ }
+ mixer_wait_for_vblank(ctx);
+}
+
+static void mixer_window_resume(struct mixer_context *ctx)
+{
+ struct hdmi_win_data *win_data;
+ int i;
+
+ for (i = 0; i < MIXER_WIN_NR; i++) {
+ win_data = &ctx->win_data[i];
+ win_data->enabled = win_data->resume;
+ win_data->resume = false;
+ }
+}
+
+static void mixer_poweron(struct mixer_context *ctx)
+{
+ struct mixer_resources *res = &ctx->mixer_res;
+
+ DRM_DEBUG_KMS("[%d] %s\n", __LINE__, __func__);
+
+ mutex_lock(&ctx->mixer_mutex);
+ if (ctx->powered) {
+ mutex_unlock(&ctx->mixer_mutex);
+ return;
+ }
+ ctx->powered = true;
+ mutex_unlock(&ctx->mixer_mutex);
+
+ clk_enable(res->mixer);
+ if (ctx->vp_enabled) {
+ clk_enable(res->vp);
+ clk_enable(res->sclk_mixer);
+ }
+
+ mixer_reg_write(res, MXR_INT_EN, ctx->int_en);
+ mixer_win_reset(ctx);
+
+ mixer_window_resume(ctx);
+}
+
+static void mixer_poweroff(struct mixer_context *ctx)
+{
+ struct mixer_resources *res = &ctx->mixer_res;
+
+ DRM_DEBUG_KMS("[%d] %s\n", __LINE__, __func__);
+
+ mutex_lock(&ctx->mixer_mutex);
+ if (!ctx->powered)
+ goto out;
+ mutex_unlock(&ctx->mixer_mutex);
+
+ mixer_window_suspend(ctx);
+
+ ctx->int_en = mixer_reg_read(res, MXR_INT_EN);
+
+ clk_disable(res->mixer);
+ if (ctx->vp_enabled) {
+ clk_disable(res->vp);
+ clk_disable(res->sclk_mixer);
+ }
+
+ mutex_lock(&ctx->mixer_mutex);
+ ctx->powered = false;
+
+out:
+ mutex_unlock(&ctx->mixer_mutex);
+}
+
+static void mixer_dpms(void *ctx, int mode)
+{
+ struct mixer_context *mixer_ctx = ctx;
+
+ DRM_DEBUG_KMS("[%d] %s\n", __LINE__, __func__);
+
+ switch (mode) {
+ case DRM_MODE_DPMS_ON:
+ if (pm_runtime_suspended(mixer_ctx->dev))
+ pm_runtime_get_sync(mixer_ctx->dev);
+ break;
+ case DRM_MODE_DPMS_STANDBY:
+ case DRM_MODE_DPMS_SUSPEND:
+ case DRM_MODE_DPMS_OFF:
+ if (!pm_runtime_suspended(mixer_ctx->dev))
+ pm_runtime_put_sync(mixer_ctx->dev);
+ break;
+ default:
+ DRM_DEBUG_KMS("unknown dpms mode: %d\n", mode);
+ break;
+ }
}
static struct exynos_mixer_ops mixer_ops = {
/* manager */
+ .iommu_on = mixer_iommu_on,
.enable_vblank = mixer_enable_vblank,
.disable_vblank = mixer_disable_vblank,
+ .wait_for_vblank = mixer_wait_for_vblank,
.dpms = mixer_dpms,
/* overlay */
- .wait_for_vblank = mixer_wait_for_vblank,
.win_mode_set = mixer_win_mode_set,
.win_commit = mixer_win_commit,
.win_disable = mixer_win_disable,
@@ -884,7 +956,6 @@ static void mixer_finish_pageflip(struct drm_device *drm_dev, int crtc)
struct drm_pending_vblank_event *e, *t;
struct timeval now;
unsigned long flags;
- bool is_checked = false;
spin_lock_irqsave(&drm_dev->event_lock, flags);
@@ -894,7 +965,6 @@ static void mixer_finish_pageflip(struct drm_device *drm_dev, int crtc)
if (crtc != e->pipe)
continue;
- is_checked = true;
do_gettimeofday(&now);
e->event.sequence = 0;
e->event.tv_sec = now.tv_sec;
@@ -902,16 +972,9 @@ static void mixer_finish_pageflip(struct drm_device *drm_dev, int crtc)
list_move_tail(&e->base.link, &e->base.file_priv->event_list);
wake_up_interruptible(&e->base.file_priv->event_wait);
+ drm_vblank_put(drm_dev, crtc);
}
- if (is_checked)
- /*
- * call drm_vblank_put only in case that drm_vblank_get was
- * called.
- */
- if (atomic_read(&drm_dev->vblank_refcount[crtc]) > 0)
- drm_vblank_put(drm_dev, crtc);
-
spin_unlock_irqrestore(&drm_dev->event_lock, flags);
}
@@ -944,6 +1007,12 @@ static irqreturn_t mixer_irq_handler(int irq, void *arg)
drm_handle_vblank(drm_hdmi_ctx->drm_dev, ctx->pipe);
mixer_finish_pageflip(drm_hdmi_ctx->drm_dev, ctx->pipe);
+
+ /* set wait vsync event to zero and wake up queue. */
+ if (atomic_read(&ctx->wait_vsync_event)) {
+ atomic_set(&ctx->wait_vsync_event, 0);
+ DRM_WAKEUP(&ctx->wait_vsync_queue);
+ }
}
out:
@@ -971,57 +1040,45 @@ static int __devinit mixer_resources_init(struct exynos_drm_hdmi_context *ctx,
spin_lock_init(&mixer_res->reg_slock);
- mixer_res->mixer = clk_get(dev, "mixer");
+ mixer_res->mixer = devm_clk_get(dev, "mixer");
if (IS_ERR_OR_NULL(mixer_res->mixer)) {
dev_err(dev, "failed to get clock 'mixer'\n");
- ret = -ENODEV;
- goto fail;
+ return -ENODEV;
}
- mixer_res->sclk_hdmi = clk_get(dev, "sclk_hdmi");
+ mixer_res->sclk_hdmi = devm_clk_get(dev, "sclk_hdmi");
if (IS_ERR_OR_NULL(mixer_res->sclk_hdmi)) {
dev_err(dev, "failed to get clock 'sclk_hdmi'\n");
- ret = -ENODEV;
- goto fail;
+ return -ENODEV;
}
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (res == NULL) {
dev_err(dev, "get memory resource failed.\n");
- ret = -ENXIO;
- goto fail;
+ return -ENXIO;
}
mixer_res->mixer_regs = devm_ioremap(&pdev->dev, res->start,
resource_size(res));
if (mixer_res->mixer_regs == NULL) {
dev_err(dev, "register mapping failed.\n");
- ret = -ENXIO;
- goto fail;
+ return -ENXIO;
}
res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
if (res == NULL) {
dev_err(dev, "get interrupt resource failed.\n");
- ret = -ENXIO;
- goto fail;
+ return -ENXIO;
}
ret = devm_request_irq(&pdev->dev, res->start, mixer_irq_handler,
0, "drm_mixer", ctx);
if (ret) {
dev_err(dev, "request interrupt failed.\n");
- goto fail;
+ return ret;
}
mixer_res->irq = res->start;
return 0;
-
-fail:
- if (!IS_ERR_OR_NULL(mixer_res->sclk_hdmi))
- clk_put(mixer_res->sclk_hdmi);
- if (!IS_ERR_OR_NULL(mixer_res->mixer))
- clk_put(mixer_res->mixer);
- return ret;
}
static int __devinit vp_resources_init(struct exynos_drm_hdmi_context *ctx,
@@ -1031,25 +1088,21 @@ static int __devinit vp_resources_init(struct exynos_drm_hdmi_context *ctx,
struct device *dev = &pdev->dev;
struct mixer_resources *mixer_res = &mixer_ctx->mixer_res;
struct resource *res;
- int ret;
- mixer_res->vp = clk_get(dev, "vp");
+ mixer_res->vp = devm_clk_get(dev, "vp");
if (IS_ERR_OR_NULL(mixer_res->vp)) {
dev_err(dev, "failed to get clock 'vp'\n");
- ret = -ENODEV;
- goto fail;
+ return -ENODEV;
}
- mixer_res->sclk_mixer = clk_get(dev, "sclk_mixer");
+ mixer_res->sclk_mixer = devm_clk_get(dev, "sclk_mixer");
if (IS_ERR_OR_NULL(mixer_res->sclk_mixer)) {
dev_err(dev, "failed to get clock 'sclk_mixer'\n");
- ret = -ENODEV;
- goto fail;
+ return -ENODEV;
}
- mixer_res->sclk_dac = clk_get(dev, "sclk_dac");
+ mixer_res->sclk_dac = devm_clk_get(dev, "sclk_dac");
if (IS_ERR_OR_NULL(mixer_res->sclk_dac)) {
dev_err(dev, "failed to get clock 'sclk_dac'\n");
- ret = -ENODEV;
- goto fail;
+ return -ENODEV;
}
if (mixer_res->sclk_hdmi)
@@ -1058,28 +1111,17 @@ static int __devinit vp_resources_init(struct exynos_drm_hdmi_context *ctx,
res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
if (res == NULL) {
dev_err(dev, "get memory resource failed.\n");
- ret = -ENXIO;
- goto fail;
+ return -ENXIO;
}
mixer_res->vp_regs = devm_ioremap(&pdev->dev, res->start,
resource_size(res));
if (mixer_res->vp_regs == NULL) {
dev_err(dev, "register mapping failed.\n");
- ret = -ENXIO;
- goto fail;
+ return -ENXIO;
}
return 0;
-
-fail:
- if (!IS_ERR_OR_NULL(mixer_res->sclk_dac))
- clk_put(mixer_res->sclk_dac);
- if (!IS_ERR_OR_NULL(mixer_res->sclk_mixer))
- clk_put(mixer_res->sclk_mixer);
- if (!IS_ERR_OR_NULL(mixer_res->vp))
- clk_put(mixer_res->vp);
- return ret;
}
static struct mixer_drv_data exynos5_mxr_drv_data = {
@@ -1149,9 +1191,12 @@ static int __devinit mixer_probe(struct platform_device *pdev)
}
ctx->dev = &pdev->dev;
+ ctx->parent_ctx = (void *)drm_hdmi_ctx;
drm_hdmi_ctx->ctx = (void *)ctx;
ctx->vp_enabled = drv->is_vp_enabled;
ctx->mxr_ver = drv->version;
+ DRM_INIT_WAITQUEUE(&ctx->wait_vsync_queue);
+ atomic_set(&ctx->wait_vsync_event, 0);
platform_set_drvdata(pdev, drm_hdmi_ctx);
@@ -1202,13 +1247,66 @@ static int mixer_suspend(struct device *dev)
struct exynos_drm_hdmi_context *drm_hdmi_ctx = get_mixer_context(dev);
struct mixer_context *ctx = drm_hdmi_ctx->ctx;
+ DRM_DEBUG_KMS("[%d] %s\n", __LINE__, __func__);
+
+ if (pm_runtime_suspended(dev)) {
+ DRM_DEBUG_KMS("%s : Already suspended\n", __func__);
+ return 0;
+ }
+
mixer_poweroff(ctx);
return 0;
}
+
+static int mixer_resume(struct device *dev)
+{
+ struct exynos_drm_hdmi_context *drm_hdmi_ctx = get_mixer_context(dev);
+ struct mixer_context *ctx = drm_hdmi_ctx->ctx;
+
+ DRM_DEBUG_KMS("[%d] %s\n", __LINE__, __func__);
+
+ if (!pm_runtime_suspended(dev)) {
+ DRM_DEBUG_KMS("%s : Already resumed\n", __func__);
+ return 0;
+ }
+
+ mixer_poweron(ctx);
+
+ return 0;
+}
#endif
-static SIMPLE_DEV_PM_OPS(mixer_pm_ops, mixer_suspend, NULL);
+#ifdef CONFIG_PM_RUNTIME
+static int mixer_runtime_suspend(struct device *dev)
+{
+ struct exynos_drm_hdmi_context *drm_hdmi_ctx = get_mixer_context(dev);
+ struct mixer_context *ctx = drm_hdmi_ctx->ctx;
+
+ DRM_DEBUG_KMS("[%d] %s\n", __LINE__, __func__);
+
+ mixer_poweroff(ctx);
+
+ return 0;
+}
+
+static int mixer_runtime_resume(struct device *dev)
+{
+ struct exynos_drm_hdmi_context *drm_hdmi_ctx = get_mixer_context(dev);
+ struct mixer_context *ctx = drm_hdmi_ctx->ctx;
+
+ DRM_DEBUG_KMS("[%d] %s\n", __LINE__, __func__);
+
+ mixer_poweron(ctx);
+
+ return 0;
+}
+#endif
+
+static const struct dev_pm_ops mixer_pm_ops = {
+ SET_SYSTEM_SLEEP_PM_OPS(mixer_suspend, mixer_resume)
+ SET_RUNTIME_PM_OPS(mixer_runtime_suspend, mixer_runtime_resume, NULL)
+};
struct platform_driver mixer_driver = {
.driver = {
diff --git a/drivers/gpu/drm/exynos/regs-fimc.h b/drivers/gpu/drm/exynos/regs-fimc.h
new file mode 100644
index 000000000000..b4f9ca1fd851
--- /dev/null
+++ b/drivers/gpu/drm/exynos/regs-fimc.h
@@ -0,0 +1,669 @@
+/* drivers/gpu/drm/exynos/regs-fimc.h
+ *
+ * Copyright (c) 2012 Samsung Electronics Co., Ltd.
+ * http://www.samsung.com/
+ *
+ * Register definition file for Samsung Camera Interface (FIMC) driver
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+*/
+
+#ifndef EXYNOS_REGS_FIMC_H
+#define EXYNOS_REGS_FIMC_H
+
+/*
+ * Register part
+*/
+/* Input source format */
+#define EXYNOS_CISRCFMT (0x00)
+/* Window offset */
+#define EXYNOS_CIWDOFST (0x04)
+/* Global control */
+#define EXYNOS_CIGCTRL (0x08)
+/* Window offset 2 */
+#define EXYNOS_CIWDOFST2 (0x14)
+/* Y 1st frame start address for output DMA */
+#define EXYNOS_CIOYSA1 (0x18)
+/* Y 2nd frame start address for output DMA */
+#define EXYNOS_CIOYSA2 (0x1c)
+/* Y 3rd frame start address for output DMA */
+#define EXYNOS_CIOYSA3 (0x20)
+/* Y 4th frame start address for output DMA */
+#define EXYNOS_CIOYSA4 (0x24)
+/* Cb 1st frame start address for output DMA */
+#define EXYNOS_CIOCBSA1 (0x28)
+/* Cb 2nd frame start address for output DMA */
+#define EXYNOS_CIOCBSA2 (0x2c)
+/* Cb 3rd frame start address for output DMA */
+#define EXYNOS_CIOCBSA3 (0x30)
+/* Cb 4th frame start address for output DMA */
+#define EXYNOS_CIOCBSA4 (0x34)
+/* Cr 1st frame start address for output DMA */
+#define EXYNOS_CIOCRSA1 (0x38)
+/* Cr 2nd frame start address for output DMA */
+#define EXYNOS_CIOCRSA2 (0x3c)
+/* Cr 3rd frame start address for output DMA */
+#define EXYNOS_CIOCRSA3 (0x40)
+/* Cr 4th frame start address for output DMA */
+#define EXYNOS_CIOCRSA4 (0x44)
+/* Target image format */
+#define EXYNOS_CITRGFMT (0x48)
+/* Output DMA control */
+#define EXYNOS_CIOCTRL (0x4c)
+/* Pre-scaler control 1 */
+#define EXYNOS_CISCPRERATIO (0x50)
+/* Pre-scaler control 2 */
+#define EXYNOS_CISCPREDST (0x54)
+/* Main scaler control */
+#define EXYNOS_CISCCTRL (0x58)
+/* Target area */
+#define EXYNOS_CITAREA (0x5c)
+/* Status */
+#define EXYNOS_CISTATUS (0x64)
+/* Status2 */
+#define EXYNOS_CISTATUS2 (0x68)
+/* Image capture enable command */
+#define EXYNOS_CIIMGCPT (0xc0)
+/* Capture sequence */
+#define EXYNOS_CICPTSEQ (0xc4)
+/* Image effects */
+#define EXYNOS_CIIMGEFF (0xd0)
+/* Y frame start address for input DMA */
+#define EXYNOS_CIIYSA0 (0xd4)
+/* Cb frame start address for input DMA */
+#define EXYNOS_CIICBSA0 (0xd8)
+/* Cr frame start address for input DMA */
+#define EXYNOS_CIICRSA0 (0xdc)
+/* Input DMA Y Line Skip */
+#define EXYNOS_CIILINESKIP_Y (0xec)
+/* Input DMA Cb Line Skip */
+#define EXYNOS_CIILINESKIP_CB (0xf0)
+/* Input DMA Cr Line Skip */
+#define EXYNOS_CIILINESKIP_CR (0xf4)
+/* Real input DMA image size */
+#define EXYNOS_CIREAL_ISIZE (0xf8)
+/* Input DMA control */
+#define EXYNOS_MSCTRL (0xfc)
+/* Y frame start address for input DMA */
+#define EXYNOS_CIIYSA1 (0x144)
+/* Cb frame start address for input DMA */
+#define EXYNOS_CIICBSA1 (0x148)
+/* Cr frame start address for input DMA */
+#define EXYNOS_CIICRSA1 (0x14c)
+/* Output DMA Y offset */
+#define EXYNOS_CIOYOFF (0x168)
+/* Output DMA CB offset */
+#define EXYNOS_CIOCBOFF (0x16c)
+/* Output DMA CR offset */
+#define EXYNOS_CIOCROFF (0x170)
+/* Input DMA Y offset */
+#define EXYNOS_CIIYOFF (0x174)
+/* Input DMA CB offset */
+#define EXYNOS_CIICBOFF (0x178)
+/* Input DMA CR offset */
+#define EXYNOS_CIICROFF (0x17c)
+/* Input DMA original image size */
+#define EXYNOS_ORGISIZE (0x180)
+/* Output DMA original image size */
+#define EXYNOS_ORGOSIZE (0x184)
+/* Real output DMA image size */
+#define EXYNOS_CIEXTEN (0x188)
+/* DMA parameter */
+#define EXYNOS_CIDMAPARAM (0x18c)
+/* MIPI CSI image format */
+#define EXYNOS_CSIIMGFMT (0x194)
+/* FIMC Clock Source Select */
+#define EXYNOS_MISC_FIMC (0x198)
+
+/* Add for FIMC v5.1 */
+/* Output Frame Buffer Sequence */
+#define EXYNOS_CIFCNTSEQ (0x1fc)
+/* Y 5th frame start address for output DMA */
+#define EXYNOS_CIOYSA5 (0x200)
+/* Y 6th frame start address for output DMA */
+#define EXYNOS_CIOYSA6 (0x204)
+/* Y 7th frame start address for output DMA */
+#define EXYNOS_CIOYSA7 (0x208)
+/* Y 8th frame start address for output DMA */
+#define EXYNOS_CIOYSA8 (0x20c)
+/* Y 9th frame start address for output DMA */
+#define EXYNOS_CIOYSA9 (0x210)
+/* Y 10th frame start address for output DMA */
+#define EXYNOS_CIOYSA10 (0x214)
+/* Y 11th frame start address for output DMA */
+#define EXYNOS_CIOYSA11 (0x218)
+/* Y 12th frame start address for output DMA */
+#define EXYNOS_CIOYSA12 (0x21c)
+/* Y 13th frame start address for output DMA */
+#define EXYNOS_CIOYSA13 (0x220)
+/* Y 14th frame start address for output DMA */
+#define EXYNOS_CIOYSA14 (0x224)
+/* Y 15th frame start address for output DMA */
+#define EXYNOS_CIOYSA15 (0x228)
+/* Y 16th frame start address for output DMA */
+#define EXYNOS_CIOYSA16 (0x22c)
+/* Y 17th frame start address for output DMA */
+#define EXYNOS_CIOYSA17 (0x230)
+/* Y 18th frame start address for output DMA */
+#define EXYNOS_CIOYSA18 (0x234)
+/* Y 19th frame start address for output DMA */
+#define EXYNOS_CIOYSA19 (0x238)
+/* Y 20th frame start address for output DMA */
+#define EXYNOS_CIOYSA20 (0x23c)
+/* Y 21th frame start address for output DMA */
+#define EXYNOS_CIOYSA21 (0x240)
+/* Y 22th frame start address for output DMA */
+#define EXYNOS_CIOYSA22 (0x244)
+/* Y 23th frame start address for output DMA */
+#define EXYNOS_CIOYSA23 (0x248)
+/* Y 24th frame start address for output DMA */
+#define EXYNOS_CIOYSA24 (0x24c)
+/* Y 25th frame start address for output DMA */
+#define EXYNOS_CIOYSA25 (0x250)
+/* Y 26th frame start address for output DMA */
+#define EXYNOS_CIOYSA26 (0x254)
+/* Y 27th frame start address for output DMA */
+#define EXYNOS_CIOYSA27 (0x258)
+/* Y 28th frame start address for output DMA */
+#define EXYNOS_CIOYSA28 (0x25c)
+/* Y 29th frame start address for output DMA */
+#define EXYNOS_CIOYSA29 (0x260)
+/* Y 30th frame start address for output DMA */
+#define EXYNOS_CIOYSA30 (0x264)
+/* Y 31th frame start address for output DMA */
+#define EXYNOS_CIOYSA31 (0x268)
+/* Y 32th frame start address for output DMA */
+#define EXYNOS_CIOYSA32 (0x26c)
+
+/* CB 5th frame start address for output DMA */
+#define EXYNOS_CIOCBSA5 (0x270)
+/* CB 6th frame start address for output DMA */
+#define EXYNOS_CIOCBSA6 (0x274)
+/* CB 7th frame start address for output DMA */
+#define EXYNOS_CIOCBSA7 (0x278)
+/* CB 8th frame start address for output DMA */
+#define EXYNOS_CIOCBSA8 (0x27c)
+/* CB 9th frame start address for output DMA */
+#define EXYNOS_CIOCBSA9 (0x280)
+/* CB 10th frame start address for output DMA */
+#define EXYNOS_CIOCBSA10 (0x284)
+/* CB 11th frame start address for output DMA */
+#define EXYNOS_CIOCBSA11 (0x288)
+/* CB 12th frame start address for output DMA */
+#define EXYNOS_CIOCBSA12 (0x28c)
+/* CB 13th frame start address for output DMA */
+#define EXYNOS_CIOCBSA13 (0x290)
+/* CB 14th frame start address for output DMA */
+#define EXYNOS_CIOCBSA14 (0x294)
+/* CB 15th frame start address for output DMA */
+#define EXYNOS_CIOCBSA15 (0x298)
+/* CB 16th frame start address for output DMA */
+#define EXYNOS_CIOCBSA16 (0x29c)
+/* CB 17th frame start address for output DMA */
+#define EXYNOS_CIOCBSA17 (0x2a0)
+/* CB 18th frame start address for output DMA */
+#define EXYNOS_CIOCBSA18 (0x2a4)
+/* CB 19th frame start address for output DMA */
+#define EXYNOS_CIOCBSA19 (0x2a8)
+/* CB 20th frame start address for output DMA */
+#define EXYNOS_CIOCBSA20 (0x2ac)
+/* CB 21th frame start address for output DMA */
+#define EXYNOS_CIOCBSA21 (0x2b0)
+/* CB 22th frame start address for output DMA */
+#define EXYNOS_CIOCBSA22 (0x2b4)
+/* CB 23th frame start address for output DMA */
+#define EXYNOS_CIOCBSA23 (0x2b8)
+/* CB 24th frame start address for output DMA */
+#define EXYNOS_CIOCBSA24 (0x2bc)
+/* CB 25th frame start address for output DMA */
+#define EXYNOS_CIOCBSA25 (0x2c0)
+/* CB 26th frame start address for output DMA */
+#define EXYNOS_CIOCBSA26 (0x2c4)
+/* CB 27th frame start address for output DMA */
+#define EXYNOS_CIOCBSA27 (0x2c8)
+/* CB 28th frame start address for output DMA */
+#define EXYNOS_CIOCBSA28 (0x2cc)
+/* CB 29th frame start address for output DMA */
+#define EXYNOS_CIOCBSA29 (0x2d0)
+/* CB 30th frame start address for output DMA */
+#define EXYNOS_CIOCBSA30 (0x2d4)
+/* CB 31th frame start address for output DMA */
+#define EXYNOS_CIOCBSA31 (0x2d8)
+/* CB 32th frame start address for output DMA */
+#define EXYNOS_CIOCBSA32 (0x2dc)
+
+/* CR 5th frame start address for output DMA */
+#define EXYNOS_CIOCRSA5 (0x2e0)
+/* CR 6th frame start address for output DMA */
+#define EXYNOS_CIOCRSA6 (0x2e4)
+/* CR 7th frame start address for output DMA */
+#define EXYNOS_CIOCRSA7 (0x2e8)
+/* CR 8th frame start address for output DMA */
+#define EXYNOS_CIOCRSA8 (0x2ec)
+/* CR 9th frame start address for output DMA */
+#define EXYNOS_CIOCRSA9 (0x2f0)
+/* CR 10th frame start address for output DMA */
+#define EXYNOS_CIOCRSA10 (0x2f4)
+/* CR 11th frame start address for output DMA */
+#define EXYNOS_CIOCRSA11 (0x2f8)
+/* CR 12th frame start address for output DMA */
+#define EXYNOS_CIOCRSA12 (0x2fc)
+/* CR 13th frame start address for output DMA */
+#define EXYNOS_CIOCRSA13 (0x300)
+/* CR 14th frame start address for output DMA */
+#define EXYNOS_CIOCRSA14 (0x304)
+/* CR 15th frame start address for output DMA */
+#define EXYNOS_CIOCRSA15 (0x308)
+/* CR 16th frame start address for output DMA */
+#define EXYNOS_CIOCRSA16 (0x30c)
+/* CR 17th frame start address for output DMA */
+#define EXYNOS_CIOCRSA17 (0x310)
+/* CR 18th frame start address for output DMA */
+#define EXYNOS_CIOCRSA18 (0x314)
+/* CR 19th frame start address for output DMA */
+#define EXYNOS_CIOCRSA19 (0x318)
+/* CR 20th frame start address for output DMA */
+#define EXYNOS_CIOCRSA20 (0x31c)
+/* CR 21th frame start address for output DMA */
+#define EXYNOS_CIOCRSA21 (0x320)
+/* CR 22th frame start address for output DMA */
+#define EXYNOS_CIOCRSA22 (0x324)
+/* CR 23th frame start address for output DMA */
+#define EXYNOS_CIOCRSA23 (0x328)
+/* CR 24th frame start address for output DMA */
+#define EXYNOS_CIOCRSA24 (0x32c)
+/* CR 25th frame start address for output DMA */
+#define EXYNOS_CIOCRSA25 (0x330)
+/* CR 26th frame start address for output DMA */
+#define EXYNOS_CIOCRSA26 (0x334)
+/* CR 27th frame start address for output DMA */
+#define EXYNOS_CIOCRSA27 (0x338)
+/* CR 28th frame start address for output DMA */
+#define EXYNOS_CIOCRSA28 (0x33c)
+/* CR 29th frame start address for output DMA */
+#define EXYNOS_CIOCRSA29 (0x340)
+/* CR 30th frame start address for output DMA */
+#define EXYNOS_CIOCRSA30 (0x344)
+/* CR 31th frame start address for output DMA */
+#define EXYNOS_CIOCRSA31 (0x348)
+/* CR 32th frame start address for output DMA */
+#define EXYNOS_CIOCRSA32 (0x34c)
+
+/*
+ * Macro part
+*/
+/* frame start address 1 ~ 4, 5 ~ 32 */
+/* Number of Default PingPong Memory */
+#define DEF_PP 4
+#define EXYNOS_CIOYSA(__x) \
+ (((__x) < DEF_PP) ? \
+ (EXYNOS_CIOYSA1 + (__x) * 4) : \
+ (EXYNOS_CIOYSA5 + ((__x) - DEF_PP) * 4))
+#define EXYNOS_CIOCBSA(__x) \
+ (((__x) < DEF_PP) ? \
+ (EXYNOS_CIOCBSA1 + (__x) * 4) : \
+ (EXYNOS_CIOCBSA5 + ((__x) - DEF_PP) * 4))
+#define EXYNOS_CIOCRSA(__x) \
+ (((__x) < DEF_PP) ? \
+ (EXYNOS_CIOCRSA1 + (__x) * 4) : \
+ (EXYNOS_CIOCRSA5 + ((__x) - DEF_PP) * 4))
+/* Number of Default PingPong Memory */
+#define DEF_IPP 1
+#define EXYNOS_CIIYSA(__x) \
+ (((__x) < DEF_IPP) ? \
+ (EXYNOS_CIIYSA0) : (EXYNOS_CIIYSA1))
+#define EXYNOS_CIICBSA(__x) \
+ (((__x) < DEF_IPP) ? \
+ (EXYNOS_CIICBSA0) : (EXYNOS_CIICBSA1))
+#define EXYNOS_CIICRSA(__x) \
+ (((__x) < DEF_IPP) ? \
+ (EXYNOS_CIICRSA0) : (EXYNOS_CIICRSA1))
+
+#define EXYNOS_CISRCFMT_SOURCEHSIZE(x) ((x) << 16)
+#define EXYNOS_CISRCFMT_SOURCEVSIZE(x) ((x) << 0)
+
+#define EXYNOS_CIWDOFST_WINHOROFST(x) ((x) << 16)
+#define EXYNOS_CIWDOFST_WINVEROFST(x) ((x) << 0)
+
+#define EXYNOS_CIWDOFST2_WINHOROFST2(x) ((x) << 16)
+#define EXYNOS_CIWDOFST2_WINVEROFST2(x) ((x) << 0)
+
+#define EXYNOS_CITRGFMT_TARGETHSIZE(x) (((x) & 0x1fff) << 16)
+#define EXYNOS_CITRGFMT_TARGETVSIZE(x) (((x) & 0x1fff) << 0)
+
+#define EXYNOS_CISCPRERATIO_SHFACTOR(x) ((x) << 28)
+#define EXYNOS_CISCPRERATIO_PREHORRATIO(x) ((x) << 16)
+#define EXYNOS_CISCPRERATIO_PREVERRATIO(x) ((x) << 0)
+
+#define EXYNOS_CISCPREDST_PREDSTWIDTH(x) ((x) << 16)
+#define EXYNOS_CISCPREDST_PREDSTHEIGHT(x) ((x) << 0)
+
+#define EXYNOS_CISCCTRL_MAINHORRATIO(x) ((x) << 16)
+#define EXYNOS_CISCCTRL_MAINVERRATIO(x) ((x) << 0)
+
+#define EXYNOS_CITAREA_TARGET_AREA(x) ((x) << 0)
+
+#define EXYNOS_CISTATUS_GET_FRAME_COUNT(x) (((x) >> 26) & 0x3)
+#define EXYNOS_CISTATUS_GET_FRAME_END(x) (((x) >> 17) & 0x1)
+#define EXYNOS_CISTATUS_GET_LAST_CAPTURE_END(x) (((x) >> 16) & 0x1)
+#define EXYNOS_CISTATUS_GET_LCD_STATUS(x) (((x) >> 9) & 0x1)
+#define EXYNOS_CISTATUS_GET_ENVID_STATUS(x) (((x) >> 8) & 0x1)
+
+#define EXYNOS_CISTATUS2_GET_FRAMECOUNT_BEFORE(x) (((x) >> 7) & 0x3f)
+#define EXYNOS_CISTATUS2_GET_FRAMECOUNT_PRESENT(x) ((x) & 0x3f)
+
+#define EXYNOS_CIIMGEFF_FIN(x) ((x & 0x7) << 26)
+#define EXYNOS_CIIMGEFF_PAT_CB(x) ((x) << 13)
+#define EXYNOS_CIIMGEFF_PAT_CR(x) ((x) << 0)
+
+#define EXYNOS_CIILINESKIP(x) (((x) & 0xf) << 24)
+
+#define EXYNOS_CIREAL_ISIZE_HEIGHT(x) ((x) << 16)
+#define EXYNOS_CIREAL_ISIZE_WIDTH(x) ((x) << 0)
+
+#define EXYNOS_MSCTRL_SUCCESSIVE_COUNT(x) ((x) << 24)
+#define EXYNOS_MSCTRL_GET_INDMA_STATUS(x) ((x) & 0x1)
+
+#define EXYNOS_CIOYOFF_VERTICAL(x) ((x) << 16)
+#define EXYNOS_CIOYOFF_HORIZONTAL(x) ((x) << 0)
+
+#define EXYNOS_CIOCBOFF_VERTICAL(x) ((x) << 16)
+#define EXYNOS_CIOCBOFF_HORIZONTAL(x) ((x) << 0)
+
+#define EXYNOS_CIOCROFF_VERTICAL(x) ((x) << 16)
+#define EXYNOS_CIOCROFF_HORIZONTAL(x) ((x) << 0)
+
+#define EXYNOS_CIIYOFF_VERTICAL(x) ((x) << 16)
+#define EXYNOS_CIIYOFF_HORIZONTAL(x) ((x) << 0)
+
+#define EXYNOS_CIICBOFF_VERTICAL(x) ((x) << 16)
+#define EXYNOS_CIICBOFF_HORIZONTAL(x) ((x) << 0)
+
+#define EXYNOS_CIICROFF_VERTICAL(x) ((x) << 16)
+#define EXYNOS_CIICROFF_HORIZONTAL(x) ((x) << 0)
+
+#define EXYNOS_ORGISIZE_VERTICAL(x) ((x) << 16)
+#define EXYNOS_ORGISIZE_HORIZONTAL(x) ((x) << 0)
+
+#define EXYNOS_ORGOSIZE_VERTICAL(x) ((x) << 16)
+#define EXYNOS_ORGOSIZE_HORIZONTAL(x) ((x) << 0)
+
+#define EXYNOS_CIEXTEN_TARGETH_EXT(x) ((((x) & 0x2000) >> 13) << 26)
+#define EXYNOS_CIEXTEN_TARGETV_EXT(x) ((((x) & 0x2000) >> 13) << 24)
+#define EXYNOS_CIEXTEN_MAINHORRATIO_EXT(x) (((x) & 0x3F) << 10)
+#define EXYNOS_CIEXTEN_MAINVERRATIO_EXT(x) ((x) & 0x3F)
+
+/*
+ * Bit definition part
+*/
+/* Source format register */
+#define EXYNOS_CISRCFMT_ITU601_8BIT (1 << 31)
+#define EXYNOS_CISRCFMT_ITU656_8BIT (0 << 31)
+#define EXYNOS_CISRCFMT_ITU601_16BIT (1 << 29)
+#define EXYNOS_CISRCFMT_ORDER422_YCBYCR (0 << 14)
+#define EXYNOS_CISRCFMT_ORDER422_YCRYCB (1 << 14)
+#define EXYNOS_CISRCFMT_ORDER422_CBYCRY (2 << 14)
+#define EXYNOS_CISRCFMT_ORDER422_CRYCBY (3 << 14)
+/* ITU601 16bit only */
+#define EXYNOS_CISRCFMT_ORDER422_Y4CBCRCBCR (0 << 14)
+/* ITU601 16bit only */
+#define EXYNOS_CISRCFMT_ORDER422_Y4CRCBCRCB (1 << 14)
+
+/* Window offset register */
+#define EXYNOS_CIWDOFST_WINOFSEN (1 << 31)
+#define EXYNOS_CIWDOFST_CLROVFIY (1 << 30)
+#define EXYNOS_CIWDOFST_CLROVRLB (1 << 29)
+#define EXYNOS_CIWDOFST_WINHOROFST_MASK (0x7ff << 16)
+#define EXYNOS_CIWDOFST_CLROVFICB (1 << 15)
+#define EXYNOS_CIWDOFST_CLROVFICR (1 << 14)
+#define EXYNOS_CIWDOFST_WINVEROFST_MASK (0xfff << 0)
+
+/* Global control register */
+#define EXYNOS_CIGCTRL_SWRST (1 << 31)
+#define EXYNOS_CIGCTRL_CAMRST_A (1 << 30)
+#define EXYNOS_CIGCTRL_SELCAM_ITU_B (0 << 29)
+#define EXYNOS_CIGCTRL_SELCAM_ITU_A (1 << 29)
+#define EXYNOS_CIGCTRL_SELCAM_ITU_MASK (1 << 29)
+#define EXYNOS_CIGCTRL_TESTPATTERN_NORMAL (0 << 27)
+#define EXYNOS_CIGCTRL_TESTPATTERN_COLOR_BAR (1 << 27)
+#define EXYNOS_CIGCTRL_TESTPATTERN_HOR_INC (2 << 27)
+#define EXYNOS_CIGCTRL_TESTPATTERN_VER_INC (3 << 27)
+#define EXYNOS_CIGCTRL_TESTPATTERN_MASK (3 << 27)
+#define EXYNOS_CIGCTRL_TESTPATTERN_SHIFT (27)
+#define EXYNOS_CIGCTRL_INVPOLPCLK (1 << 26)
+#define EXYNOS_CIGCTRL_INVPOLVSYNC (1 << 25)
+#define EXYNOS_CIGCTRL_INVPOLHREF (1 << 24)
+#define EXYNOS_CIGCTRL_IRQ_OVFEN (1 << 22)
+#define EXYNOS_CIGCTRL_HREF_MASK (1 << 21)
+#define EXYNOS_CIGCTRL_IRQ_EDGE (0 << 20)
+#define EXYNOS_CIGCTRL_IRQ_LEVEL (1 << 20)
+#define EXYNOS_CIGCTRL_IRQ_CLR (1 << 19)
+#define EXYNOS_CIGCTRL_IRQ_END_DISABLE (1 << 18)
+#define EXYNOS_CIGCTRL_IRQ_DISABLE (0 << 16)
+#define EXYNOS_CIGCTRL_IRQ_ENABLE (1 << 16)
+#define EXYNOS_CIGCTRL_SHADOW_DISABLE (1 << 12)
+#define EXYNOS_CIGCTRL_CAM_JPEG (1 << 8)
+#define EXYNOS_CIGCTRL_SELCAM_MIPI_B (0 << 7)
+#define EXYNOS_CIGCTRL_SELCAM_MIPI_A (1 << 7)
+#define EXYNOS_CIGCTRL_SELCAM_MIPI_MASK (1 << 7)
+#define EXYNOS_CIGCTRL_SELWB_CAMIF_CAMERA (0 << 6)
+#define EXYNOS_CIGCTRL_SELWB_CAMIF_WRITEBACK (1 << 6)
+#define EXYNOS_CIGCTRL_SELWRITEBACK_MASK (1 << 10)
+#define EXYNOS_CIGCTRL_SELWRITEBACK_A (1 << 10)
+#define EXYNOS_CIGCTRL_SELWRITEBACK_B (0 << 10)
+#define EXYNOS_CIGCTRL_SELWB_CAMIF_MASK (1 << 6)
+#define EXYNOS_CIGCTRL_CSC_ITU601 (0 << 5)
+#define EXYNOS_CIGCTRL_CSC_ITU709 (1 << 5)
+#define EXYNOS_CIGCTRL_CSC_MASK (1 << 5)
+#define EXYNOS_CIGCTRL_INVPOLHSYNC (1 << 4)
+#define EXYNOS_CIGCTRL_SELCAM_FIMC_ITU (0 << 3)
+#define EXYNOS_CIGCTRL_SELCAM_FIMC_MIPI (1 << 3)
+#define EXYNOS_CIGCTRL_SELCAM_FIMC_MASK (1 << 3)
+#define EXYNOS_CIGCTRL_PROGRESSIVE (0 << 0)
+#define EXYNOS_CIGCTRL_INTERLACE (1 << 0)
+
+/* Window offset2 register */
+#define EXYNOS_CIWDOFST_WINHOROFST2_MASK (0xfff << 16)
+#define EXYNOS_CIWDOFST_WINVEROFST2_MASK (0xfff << 16)
+
+/* Target format register */
+#define EXYNOS_CITRGFMT_INROT90_CLOCKWISE (1 << 31)
+#define EXYNOS_CITRGFMT_OUTFORMAT_YCBCR420 (0 << 29)
+#define EXYNOS_CITRGFMT_OUTFORMAT_YCBCR422 (1 << 29)
+#define EXYNOS_CITRGFMT_OUTFORMAT_YCBCR422_1PLANE (2 << 29)
+#define EXYNOS_CITRGFMT_OUTFORMAT_RGB (3 << 29)
+#define EXYNOS_CITRGFMT_OUTFORMAT_MASK (3 << 29)
+#define EXYNOS_CITRGFMT_FLIP_SHIFT (14)
+#define EXYNOS_CITRGFMT_FLIP_NORMAL (0 << 14)
+#define EXYNOS_CITRGFMT_FLIP_X_MIRROR (1 << 14)
+#define EXYNOS_CITRGFMT_FLIP_Y_MIRROR (2 << 14)
+#define EXYNOS_CITRGFMT_FLIP_180 (3 << 14)
+#define EXYNOS_CITRGFMT_FLIP_MASK (3 << 14)
+#define EXYNOS_CITRGFMT_OUTROT90_CLOCKWISE (1 << 13)
+#define EXYNOS_CITRGFMT_TARGETV_MASK (0x1fff << 0)
+#define EXYNOS_CITRGFMT_TARGETH_MASK (0x1fff << 16)
+
+/* Output DMA control register */
+#define EXYNOS_CIOCTRL_WEAVE_OUT (1 << 31)
+#define EXYNOS_CIOCTRL_WEAVE_MASK (1 << 31)
+#define EXYNOS_CIOCTRL_LASTENDEN (1 << 30)
+#define EXYNOS_CIOCTRL_ORDER2P_LSB_CBCR (0 << 24)
+#define EXYNOS_CIOCTRL_ORDER2P_LSB_CRCB (1 << 24)
+#define EXYNOS_CIOCTRL_ORDER2P_MSB_CRCB (2 << 24)
+#define EXYNOS_CIOCTRL_ORDER2P_MSB_CBCR (3 << 24)
+#define EXYNOS_CIOCTRL_ORDER2P_SHIFT (24)
+#define EXYNOS_CIOCTRL_ORDER2P_MASK (3 << 24)
+#define EXYNOS_CIOCTRL_YCBCR_3PLANE (0 << 3)
+#define EXYNOS_CIOCTRL_YCBCR_2PLANE (1 << 3)
+#define EXYNOS_CIOCTRL_YCBCR_PLANE_MASK (1 << 3)
+#define EXYNOS_CIOCTRL_LASTIRQ_ENABLE (1 << 2)
+#define EXYNOS_CIOCTRL_ALPHA_OUT (0xff << 4)
+#define EXYNOS_CIOCTRL_ORDER422_YCBYCR (0 << 0)
+#define EXYNOS_CIOCTRL_ORDER422_YCRYCB (1 << 0)
+#define EXYNOS_CIOCTRL_ORDER422_CBYCRY (2 << 0)
+#define EXYNOS_CIOCTRL_ORDER422_CRYCBY (3 << 0)
+#define EXYNOS_CIOCTRL_ORDER422_MASK (3 << 0)
+
+/* Main scaler control register */
+#define EXYNOS_CISCCTRL_SCALERBYPASS (1 << 31)
+#define EXYNOS_CISCCTRL_SCALEUP_H (1 << 30)
+#define EXYNOS_CISCCTRL_SCALEUP_V (1 << 29)
+#define EXYNOS_CISCCTRL_CSCR2Y_NARROW (0 << 28)
+#define EXYNOS_CISCCTRL_CSCR2Y_WIDE (1 << 28)
+#define EXYNOS_CISCCTRL_CSCY2R_NARROW (0 << 27)
+#define EXYNOS_CISCCTRL_CSCY2R_WIDE (1 << 27)
+#define EXYNOS_CISCCTRL_LCDPATHEN_FIFO (1 << 26)
+#define EXYNOS_CISCCTRL_PROGRESSIVE (0 << 25)
+#define EXYNOS_CISCCTRL_INTERLACE (1 << 25)
+#define EXYNOS_CISCCTRL_SCAN_MASK (1 << 25)
+#define EXYNOS_CISCCTRL_SCALERSTART (1 << 15)
+#define EXYNOS_CISCCTRL_INRGB_FMT_RGB565 (0 << 13)
+#define EXYNOS_CISCCTRL_INRGB_FMT_RGB666 (1 << 13)
+#define EXYNOS_CISCCTRL_INRGB_FMT_RGB888 (2 << 13)
+#define EXYNOS_CISCCTRL_INRGB_FMT_RGB_MASK (3 << 13)
+#define EXYNOS_CISCCTRL_OUTRGB_FMT_RGB565 (0 << 11)
+#define EXYNOS_CISCCTRL_OUTRGB_FMT_RGB666 (1 << 11)
+#define EXYNOS_CISCCTRL_OUTRGB_FMT_RGB888 (2 << 11)
+#define EXYNOS_CISCCTRL_OUTRGB_FMT_RGB_MASK (3 << 11)
+#define EXYNOS_CISCCTRL_EXTRGB_NORMAL (0 << 10)
+#define EXYNOS_CISCCTRL_EXTRGB_EXTENSION (1 << 10)
+#define EXYNOS_CISCCTRL_ONE2ONE (1 << 9)
+#define EXYNOS_CISCCTRL_MAIN_V_RATIO_MASK (0x1ff << 0)
+#define EXYNOS_CISCCTRL_MAIN_H_RATIO_MASK (0x1ff << 16)
+
+/* Status register */
+#define EXYNOS_CISTATUS_OVFIY (1 << 31)
+#define EXYNOS_CISTATUS_OVFICB (1 << 30)
+#define EXYNOS_CISTATUS_OVFICR (1 << 29)
+#define EXYNOS_CISTATUS_VSYNC (1 << 28)
+#define EXYNOS_CISTATUS_SCALERSTART (1 << 26)
+#define EXYNOS_CISTATUS_WINOFSTEN (1 << 25)
+#define EXYNOS_CISTATUS_IMGCPTEN (1 << 22)
+#define EXYNOS_CISTATUS_IMGCPTENSC (1 << 21)
+#define EXYNOS_CISTATUS_VSYNC_A (1 << 20)
+#define EXYNOS_CISTATUS_VSYNC_B (1 << 19)
+#define EXYNOS_CISTATUS_OVRLB (1 << 18)
+#define EXYNOS_CISTATUS_FRAMEEND (1 << 17)
+#define EXYNOS_CISTATUS_LASTCAPTUREEND (1 << 16)
+#define EXYNOS_CISTATUS_VVALID_A (1 << 15)
+#define EXYNOS_CISTATUS_VVALID_B (1 << 14)
+
+/* Image capture enable register */
+#define EXYNOS_CIIMGCPT_IMGCPTEN (1 << 31)
+#define EXYNOS_CIIMGCPT_IMGCPTEN_SC (1 << 30)
+#define EXYNOS_CIIMGCPT_CPT_FREN_ENABLE (1 << 25)
+#define EXYNOS_CIIMGCPT_CPT_FRMOD_EN (0 << 18)
+#define EXYNOS_CIIMGCPT_CPT_FRMOD_CNT (1 << 18)
+
+/* Image effects register */
+#define EXYNOS_CIIMGEFF_IE_DISABLE (0 << 30)
+#define EXYNOS_CIIMGEFF_IE_ENABLE (1 << 30)
+#define EXYNOS_CIIMGEFF_IE_SC_BEFORE (0 << 29)
+#define EXYNOS_CIIMGEFF_IE_SC_AFTER (1 << 29)
+#define EXYNOS_CIIMGEFF_FIN_BYPASS (0 << 26)
+#define EXYNOS_CIIMGEFF_FIN_ARBITRARY (1 << 26)
+#define EXYNOS_CIIMGEFF_FIN_NEGATIVE (2 << 26)
+#define EXYNOS_CIIMGEFF_FIN_ARTFREEZE (3 << 26)
+#define EXYNOS_CIIMGEFF_FIN_EMBOSSING (4 << 26)
+#define EXYNOS_CIIMGEFF_FIN_SILHOUETTE (5 << 26)
+#define EXYNOS_CIIMGEFF_FIN_MASK (7 << 26)
+#define EXYNOS_CIIMGEFF_PAT_CBCR_MASK ((0xff < 13) | (0xff < 0))
+
+/* Real input DMA size register */
+#define EXYNOS_CIREAL_ISIZE_AUTOLOAD_ENABLE (1 << 31)
+#define EXYNOS_CIREAL_ISIZE_ADDR_CH_DISABLE (1 << 30)
+#define EXYNOS_CIREAL_ISIZE_HEIGHT_MASK (0x3FFF << 16)
+#define EXYNOS_CIREAL_ISIZE_WIDTH_MASK (0x3FFF << 0)
+
+/* Input DMA control register */
+#define EXYNOS_MSCTRL_FIELD_MASK (1 << 31)
+#define EXYNOS_MSCTRL_FIELD_WEAVE (1 << 31)
+#define EXYNOS_MSCTRL_FIELD_NORMAL (0 << 31)
+#define EXYNOS_MSCTRL_BURST_CNT (24)
+#define EXYNOS_MSCTRL_BURST_CNT_MASK (0xf << 24)
+#define EXYNOS_MSCTRL_ORDER2P_LSB_CBCR (0 << 16)
+#define EXYNOS_MSCTRL_ORDER2P_LSB_CRCB (1 << 16)
+#define EXYNOS_MSCTRL_ORDER2P_MSB_CRCB (2 << 16)
+#define EXYNOS_MSCTRL_ORDER2P_MSB_CBCR (3 << 16)
+#define EXYNOS_MSCTRL_ORDER2P_SHIFT (16)
+#define EXYNOS_MSCTRL_ORDER2P_SHIFT_MASK (0x3 << 16)
+#define EXYNOS_MSCTRL_C_INT_IN_3PLANE (0 << 15)
+#define EXYNOS_MSCTRL_C_INT_IN_2PLANE (1 << 15)
+#define EXYNOS_MSCTRL_FLIP_SHIFT (13)
+#define EXYNOS_MSCTRL_FLIP_NORMAL (0 << 13)
+#define EXYNOS_MSCTRL_FLIP_X_MIRROR (1 << 13)
+#define EXYNOS_MSCTRL_FLIP_Y_MIRROR (2 << 13)
+#define EXYNOS_MSCTRL_FLIP_180 (3 << 13)
+#define EXYNOS_MSCTRL_FLIP_MASK (3 << 13)
+#define EXYNOS_MSCTRL_ORDER422_CRYCBY (0 << 4)
+#define EXYNOS_MSCTRL_ORDER422_YCRYCB (1 << 4)
+#define EXYNOS_MSCTRL_ORDER422_CBYCRY (2 << 4)
+#define EXYNOS_MSCTRL_ORDER422_YCBYCR (3 << 4)
+#define EXYNOS_MSCTRL_INPUT_EXTCAM (0 << 3)
+#define EXYNOS_MSCTRL_INPUT_MEMORY (1 << 3)
+#define EXYNOS_MSCTRL_INPUT_MASK (1 << 3)
+#define EXYNOS_MSCTRL_INFORMAT_YCBCR420 (0 << 1)
+#define EXYNOS_MSCTRL_INFORMAT_YCBCR422 (1 << 1)
+#define EXYNOS_MSCTRL_INFORMAT_YCBCR422_1PLANE (2 << 1)
+#define EXYNOS_MSCTRL_INFORMAT_RGB (3 << 1)
+#define EXYNOS_MSCTRL_ENVID (1 << 0)
+
+/* DMA parameter register */
+#define EXYNOS_CIDMAPARAM_R_MODE_LINEAR (0 << 29)
+#define EXYNOS_CIDMAPARAM_R_MODE_CONFTILE (1 << 29)
+#define EXYNOS_CIDMAPARAM_R_MODE_16X16 (2 << 29)
+#define EXYNOS_CIDMAPARAM_R_MODE_64X32 (3 << 29)
+#define EXYNOS_CIDMAPARAM_R_MODE_MASK (3 << 29)
+#define EXYNOS_CIDMAPARAM_R_TILE_HSIZE_64 (0 << 24)
+#define EXYNOS_CIDMAPARAM_R_TILE_HSIZE_128 (1 << 24)
+#define EXYNOS_CIDMAPARAM_R_TILE_HSIZE_256 (2 << 24)
+#define EXYNOS_CIDMAPARAM_R_TILE_HSIZE_512 (3 << 24)
+#define EXYNOS_CIDMAPARAM_R_TILE_HSIZE_1024 (4 << 24)
+#define EXYNOS_CIDMAPARAM_R_TILE_HSIZE_2048 (5 << 24)
+#define EXYNOS_CIDMAPARAM_R_TILE_HSIZE_4096 (6 << 24)
+#define EXYNOS_CIDMAPARAM_R_TILE_VSIZE_1 (0 << 20)
+#define EXYNOS_CIDMAPARAM_R_TILE_VSIZE_2 (1 << 20)
+#define EXYNOS_CIDMAPARAM_R_TILE_VSIZE_4 (2 << 20)
+#define EXYNOS_CIDMAPARAM_R_TILE_VSIZE_8 (3 << 20)
+#define EXYNOS_CIDMAPARAM_R_TILE_VSIZE_16 (4 << 20)
+#define EXYNOS_CIDMAPARAM_R_TILE_VSIZE_32 (5 << 20)
+#define EXYNOS_CIDMAPARAM_W_MODE_LINEAR (0 << 13)
+#define EXYNOS_CIDMAPARAM_W_MODE_CONFTILE (1 << 13)
+#define EXYNOS_CIDMAPARAM_W_MODE_16X16 (2 << 13)
+#define EXYNOS_CIDMAPARAM_W_MODE_64X32 (3 << 13)
+#define EXYNOS_CIDMAPARAM_W_MODE_MASK (3 << 13)
+#define EXYNOS_CIDMAPARAM_W_TILE_HSIZE_64 (0 << 8)
+#define EXYNOS_CIDMAPARAM_W_TILE_HSIZE_128 (1 << 8)
+#define EXYNOS_CIDMAPARAM_W_TILE_HSIZE_256 (2 << 8)
+#define EXYNOS_CIDMAPARAM_W_TILE_HSIZE_512 (3 << 8)
+#define EXYNOS_CIDMAPARAM_W_TILE_HSIZE_1024 (4 << 8)
+#define EXYNOS_CIDMAPARAM_W_TILE_HSIZE_2048 (5 << 8)
+#define EXYNOS_CIDMAPARAM_W_TILE_HSIZE_4096 (6 << 8)
+#define EXYNOS_CIDMAPARAM_W_TILE_VSIZE_1 (0 << 4)
+#define EXYNOS_CIDMAPARAM_W_TILE_VSIZE_2 (1 << 4)
+#define EXYNOS_CIDMAPARAM_W_TILE_VSIZE_4 (2 << 4)
+#define EXYNOS_CIDMAPARAM_W_TILE_VSIZE_8 (3 << 4)
+#define EXYNOS_CIDMAPARAM_W_TILE_VSIZE_16 (4 << 4)
+#define EXYNOS_CIDMAPARAM_W_TILE_VSIZE_32 (5 << 4)
+
+/* Gathering Extension register */
+#define EXYNOS_CIEXTEN_TARGETH_EXT_MASK (1 << 26)
+#define EXYNOS_CIEXTEN_TARGETV_EXT_MASK (1 << 24)
+#define EXYNOS_CIEXTEN_MAINHORRATIO_EXT_MASK (0x3F << 10)
+#define EXYNOS_CIEXTEN_MAINVERRATIO_EXT_MASK (0x3F)
+#define EXYNOS_CIEXTEN_YUV444_OUT (1 << 22)
+
+/* FIMC Clock Source Select register */
+#define EXYNOS_CLKSRC_HCLK (0 << 1)
+#define EXYNOS_CLKSRC_HCLK_MASK (1 << 1)
+#define EXYNOS_CLKSRC_SCLK (1 << 1)
+
+/* SYSREG for FIMC writeback */
+#define SYSREG_CAMERA_BLK (S3C_VA_SYS + 0x0218)
+#define SYSREG_ISP_BLK (S3C_VA_SYS + 0x020c)
+#define SYSREG_FIMD0WB_DEST_MASK (0x3 << 23)
+#define SYSREG_FIMD0WB_DEST_SHIFT 23
+
+#endif /* EXYNOS_REGS_FIMC_H */
diff --git a/drivers/gpu/drm/exynos/regs-gsc.h b/drivers/gpu/drm/exynos/regs-gsc.h
new file mode 100644
index 000000000000..9ad592707aaf
--- /dev/null
+++ b/drivers/gpu/drm/exynos/regs-gsc.h
@@ -0,0 +1,284 @@
+/* linux/drivers/gpu/drm/exynos/regs-gsc.h
+ *
+ * Copyright (c) 2012 Samsung Electronics Co., Ltd.
+ * http://www.samsung.com
+ *
+ * Register definition file for Samsung G-Scaler driver
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef EXYNOS_REGS_GSC_H_
+#define EXYNOS_REGS_GSC_H_
+
+/* G-Scaler enable */
+#define GSC_ENABLE 0x00
+#define GSC_ENABLE_PP_UPDATE_TIME_MASK (1 << 9)
+#define GSC_ENABLE_PP_UPDATE_TIME_CURR (0 << 9)
+#define GSC_ENABLE_PP_UPDATE_TIME_EOPAS (1 << 9)
+#define GSC_ENABLE_CLK_GATE_MODE_MASK (1 << 8)
+#define GSC_ENABLE_CLK_GATE_MODE_FREE (1 << 8)
+#define GSC_ENABLE_IPC_MODE_MASK (1 << 7)
+#define GSC_ENABLE_NORM_MODE (0 << 7)
+#define GSC_ENABLE_IPC_MODE (1 << 7)
+#define GSC_ENABLE_PP_UPDATE_MODE_MASK (1 << 6)
+#define GSC_ENABLE_PP_UPDATE_FIRE_MODE (1 << 6)
+#define GSC_ENABLE_IN_PP_UPDATE (1 << 5)
+#define GSC_ENABLE_ON_CLEAR_MASK (1 << 4)
+#define GSC_ENABLE_ON_CLEAR_ONESHOT (1 << 4)
+#define GSC_ENABLE_QOS_ENABLE (1 << 3)
+#define GSC_ENABLE_OP_STATUS (1 << 2)
+#define GSC_ENABLE_SFR_UPDATE (1 << 1)
+#define GSC_ENABLE_ON (1 << 0)
+
+/* G-Scaler S/W reset */
+#define GSC_SW_RESET 0x04
+#define GSC_SW_RESET_SRESET (1 << 0)
+
+/* G-Scaler IRQ */
+#define GSC_IRQ 0x08
+#define GSC_IRQ_STATUS_OR_IRQ (1 << 17)
+#define GSC_IRQ_STATUS_OR_FRM_DONE (1 << 16)
+#define GSC_IRQ_OR_MASK (1 << 2)
+#define GSC_IRQ_FRMDONE_MASK (1 << 1)
+#define GSC_IRQ_ENABLE (1 << 0)
+
+/* G-Scaler input control */
+#define GSC_IN_CON 0x10
+#define GSC_IN_CHROM_STRIDE_SEL_MASK (1 << 20)
+#define GSC_IN_CHROM_STRIDE_SEPAR (1 << 20)
+#define GSC_IN_RB_SWAP_MASK (1 << 19)
+#define GSC_IN_RB_SWAP (1 << 19)
+#define GSC_IN_ROT_MASK (7 << 16)
+#define GSC_IN_ROT_270 (7 << 16)
+#define GSC_IN_ROT_90_YFLIP (6 << 16)
+#define GSC_IN_ROT_90_XFLIP (5 << 16)
+#define GSC_IN_ROT_90 (4 << 16)
+#define GSC_IN_ROT_180 (3 << 16)
+#define GSC_IN_ROT_YFLIP (2 << 16)
+#define GSC_IN_ROT_XFLIP (1 << 16)
+#define GSC_IN_RGB_TYPE_MASK (3 << 14)
+#define GSC_IN_RGB_HD_WIDE (3 << 14)
+#define GSC_IN_RGB_HD_NARROW (2 << 14)
+#define GSC_IN_RGB_SD_WIDE (1 << 14)
+#define GSC_IN_RGB_SD_NARROW (0 << 14)
+#define GSC_IN_YUV422_1P_ORDER_MASK (1 << 13)
+#define GSC_IN_YUV422_1P_ORDER_LSB_Y (0 << 13)
+#define GSC_IN_YUV422_1P_OEDER_LSB_C (1 << 13)
+#define GSC_IN_CHROMA_ORDER_MASK (1 << 12)
+#define GSC_IN_CHROMA_ORDER_CBCR (0 << 12)
+#define GSC_IN_CHROMA_ORDER_CRCB (1 << 12)
+#define GSC_IN_FORMAT_MASK (7 << 8)
+#define GSC_IN_XRGB8888 (0 << 8)
+#define GSC_IN_RGB565 (1 << 8)
+#define GSC_IN_YUV420_2P (2 << 8)
+#define GSC_IN_YUV420_3P (3 << 8)
+#define GSC_IN_YUV422_1P (4 << 8)
+#define GSC_IN_YUV422_2P (5 << 8)
+#define GSC_IN_YUV422_3P (6 << 8)
+#define GSC_IN_TILE_TYPE_MASK (1 << 4)
+#define GSC_IN_TILE_C_16x8 (0 << 4)
+#define GSC_IN_TILE_C_16x16 (1 << 4)
+#define GSC_IN_TILE_MODE (1 << 3)
+#define GSC_IN_LOCAL_SEL_MASK (3 << 1)
+#define GSC_IN_LOCAL_CAM3 (3 << 1)
+#define GSC_IN_LOCAL_FIMD_WB (2 << 1)
+#define GSC_IN_LOCAL_CAM1 (1 << 1)
+#define GSC_IN_LOCAL_CAM0 (0 << 1)
+#define GSC_IN_PATH_MASK (1 << 0)
+#define GSC_IN_PATH_LOCAL (1 << 0)
+#define GSC_IN_PATH_MEMORY (0 << 0)
+
+/* G-Scaler source image size */
+#define GSC_SRCIMG_SIZE 0x14
+#define GSC_SRCIMG_HEIGHT_MASK (0x1fff << 16)
+#define GSC_SRCIMG_HEIGHT(x) ((x) << 16)
+#define GSC_SRCIMG_WIDTH_MASK (0x3fff << 0)
+#define GSC_SRCIMG_WIDTH(x) ((x) << 0)
+
+/* G-Scaler source image offset */
+#define GSC_SRCIMG_OFFSET 0x18
+#define GSC_SRCIMG_OFFSET_Y_MASK (0x1fff << 16)
+#define GSC_SRCIMG_OFFSET_Y(x) ((x) << 16)
+#define GSC_SRCIMG_OFFSET_X_MASK (0x1fff << 0)
+#define GSC_SRCIMG_OFFSET_X(x) ((x) << 0)
+
+/* G-Scaler cropped source image size */
+#define GSC_CROPPED_SIZE 0x1C
+#define GSC_CROPPED_HEIGHT_MASK (0x1fff << 16)
+#define GSC_CROPPED_HEIGHT(x) ((x) << 16)
+#define GSC_CROPPED_WIDTH_MASK (0x1fff << 0)
+#define GSC_CROPPED_WIDTH(x) ((x) << 0)
+
+/* G-Scaler output control */
+#define GSC_OUT_CON 0x20
+#define GSC_OUT_GLOBAL_ALPHA_MASK (0xff << 24)
+#define GSC_OUT_GLOBAL_ALPHA(x) ((x) << 24)
+#define GSC_OUT_CHROM_STRIDE_SEL_MASK (1 << 13)
+#define GSC_OUT_CHROM_STRIDE_SEPAR (1 << 13)
+#define GSC_OUT_RB_SWAP_MASK (1 << 12)
+#define GSC_OUT_RB_SWAP (1 << 12)
+#define GSC_OUT_RGB_TYPE_MASK (3 << 10)
+#define GSC_OUT_RGB_HD_NARROW (3 << 10)
+#define GSC_OUT_RGB_HD_WIDE (2 << 10)
+#define GSC_OUT_RGB_SD_NARROW (1 << 10)
+#define GSC_OUT_RGB_SD_WIDE (0 << 10)
+#define GSC_OUT_YUV422_1P_ORDER_MASK (1 << 9)
+#define GSC_OUT_YUV422_1P_ORDER_LSB_Y (0 << 9)
+#define GSC_OUT_YUV422_1P_OEDER_LSB_C (1 << 9)
+#define GSC_OUT_CHROMA_ORDER_MASK (1 << 8)
+#define GSC_OUT_CHROMA_ORDER_CBCR (0 << 8)
+#define GSC_OUT_CHROMA_ORDER_CRCB (1 << 8)
+#define GSC_OUT_FORMAT_MASK (7 << 4)
+#define GSC_OUT_XRGB8888 (0 << 4)
+#define GSC_OUT_RGB565 (1 << 4)
+#define GSC_OUT_YUV420_2P (2 << 4)
+#define GSC_OUT_YUV420_3P (3 << 4)
+#define GSC_OUT_YUV422_1P (4 << 4)
+#define GSC_OUT_YUV422_2P (5 << 4)
+#define GSC_OUT_YUV444 (7 << 4)
+#define GSC_OUT_TILE_TYPE_MASK (1 << 2)
+#define GSC_OUT_TILE_C_16x8 (0 << 2)
+#define GSC_OUT_TILE_C_16x16 (1 << 2)
+#define GSC_OUT_TILE_MODE (1 << 1)
+#define GSC_OUT_PATH_MASK (1 << 0)
+#define GSC_OUT_PATH_LOCAL (1 << 0)
+#define GSC_OUT_PATH_MEMORY (0 << 0)
+
+/* G-Scaler scaled destination image size */
+#define GSC_SCALED_SIZE 0x24
+#define GSC_SCALED_HEIGHT_MASK (0x1fff << 16)
+#define GSC_SCALED_HEIGHT(x) ((x) << 16)
+#define GSC_SCALED_WIDTH_MASK (0x1fff << 0)
+#define GSC_SCALED_WIDTH(x) ((x) << 0)
+
+/* G-Scaler pre scale ratio */
+#define GSC_PRE_SCALE_RATIO 0x28
+#define GSC_PRESC_SHFACTOR_MASK (7 << 28)
+#define GSC_PRESC_SHFACTOR(x) ((x) << 28)
+#define GSC_PRESC_V_RATIO_MASK (7 << 16)
+#define GSC_PRESC_V_RATIO(x) ((x) << 16)
+#define GSC_PRESC_H_RATIO_MASK (7 << 0)
+#define GSC_PRESC_H_RATIO(x) ((x) << 0)
+
+/* G-Scaler main scale horizontal ratio */
+#define GSC_MAIN_H_RATIO 0x2C
+#define GSC_MAIN_H_RATIO_MASK (0xfffff << 0)
+#define GSC_MAIN_H_RATIO_VALUE(x) ((x) << 0)
+
+/* G-Scaler main scale vertical ratio */
+#define GSC_MAIN_V_RATIO 0x30
+#define GSC_MAIN_V_RATIO_MASK (0xfffff << 0)
+#define GSC_MAIN_V_RATIO_VALUE(x) ((x) << 0)
+
+/* G-Scaler input chrominance stride */
+#define GSC_IN_CHROM_STRIDE 0x3C
+#define GSC_IN_CHROM_STRIDE_MASK (0x3fff << 0)
+#define GSC_IN_CHROM_STRIDE_VALUE(x) ((x) << 0)
+
+/* G-Scaler destination image size */
+#define GSC_DSTIMG_SIZE 0x40
+#define GSC_DSTIMG_HEIGHT_MASK (0x1fff << 16)
+#define GSC_DSTIMG_HEIGHT(x) ((x) << 16)
+#define GSC_DSTIMG_WIDTH_MASK (0x1fff << 0)
+#define GSC_DSTIMG_WIDTH(x) ((x) << 0)
+
+/* G-Scaler destination image offset */
+#define GSC_DSTIMG_OFFSET 0x44
+#define GSC_DSTIMG_OFFSET_Y_MASK (0x1fff << 16)
+#define GSC_DSTIMG_OFFSET_Y(x) ((x) << 16)
+#define GSC_DSTIMG_OFFSET_X_MASK (0x1fff << 0)
+#define GSC_DSTIMG_OFFSET_X(x) ((x) << 0)
+
+/* G-Scaler output chrominance stride */
+#define GSC_OUT_CHROM_STRIDE 0x48
+#define GSC_OUT_CHROM_STRIDE_MASK (0x3fff << 0)
+#define GSC_OUT_CHROM_STRIDE_VALUE(x) ((x) << 0)
+
+/* G-Scaler input y address mask */
+#define GSC_IN_BASE_ADDR_Y_MASK 0x4C
+/* G-Scaler input y base address */
+#define GSC_IN_BASE_ADDR_Y(n) (0x50 + (n) * 0x4)
+/* G-Scaler input y base current address */
+#define GSC_IN_BASE_ADDR_Y_CUR(n) (0x60 + (n) * 0x4)
+
+/* G-Scaler input cb address mask */
+#define GSC_IN_BASE_ADDR_CB_MASK 0x7C
+/* G-Scaler input cb base address */
+#define GSC_IN_BASE_ADDR_CB(n) (0x80 + (n) * 0x4)
+/* G-Scaler input cb base current address */
+#define GSC_IN_BASE_ADDR_CB_CUR(n) (0x90 + (n) * 0x4)
+
+/* G-Scaler input cr address mask */
+#define GSC_IN_BASE_ADDR_CR_MASK 0xAC
+/* G-Scaler input cr base address */
+#define GSC_IN_BASE_ADDR_CR(n) (0xB0 + (n) * 0x4)
+/* G-Scaler input cr base current address */
+#define GSC_IN_BASE_ADDR_CR_CUR(n) (0xC0 + (n) * 0x4)
+
+/* G-Scaler input address mask */
+#define GSC_IN_CURR_ADDR_INDEX (0xf << 24)
+#define GSC_IN_CURR_GET_INDEX(x) ((x) >> 24)
+#define GSC_IN_BASE_ADDR_PINGPONG(x) ((x) << 16)
+#define GSC_IN_BASE_ADDR_MASK (0xff << 0)
+
+/* G-Scaler output y address mask */
+#define GSC_OUT_BASE_ADDR_Y_MASK 0x10C
+/* G-Scaler output y base address */
+#define GSC_OUT_BASE_ADDR_Y(n) (0x110 + (n) * 0x4)
+
+/* G-Scaler output cb address mask */
+#define GSC_OUT_BASE_ADDR_CB_MASK 0x15C
+/* G-Scaler output cb base address */
+#define GSC_OUT_BASE_ADDR_CB(n) (0x160 + (n) * 0x4)
+
+/* G-Scaler output cr address mask */
+#define GSC_OUT_BASE_ADDR_CR_MASK 0x1AC
+/* G-Scaler output cr base address */
+#define GSC_OUT_BASE_ADDR_CR(n) (0x1B0 + (n) * 0x4)
+
+/* G-Scaler output address mask */
+#define GSC_OUT_CURR_ADDR_INDEX (0xf << 24)
+#define GSC_OUT_CURR_GET_INDEX(x) ((x) >> 24)
+#define GSC_OUT_BASE_ADDR_PINGPONG(x) ((x) << 16)
+#define GSC_OUT_BASE_ADDR_MASK (0xffff << 0)
+
+/* G-Scaler horizontal scaling filter */
+#define GSC_HCOEF(n, s, x) (0x300 + (n) * 0x4 + (s) * 0x30 + (x) * 0x300)
+
+/* G-Scaler vertical scaling filter */
+#define GSC_VCOEF(n, s, x) (0x200 + (n) * 0x4 + (s) * 0x30 + (x) * 0x300)
+
+/* G-Scaler BUS control */
+#define GSC_BUSCON 0xA78
+#define GSC_BUSCON_INT_TIME_MASK (1 << 8)
+#define GSC_BUSCON_INT_DATA_TRANS (0 << 8)
+#define GSC_BUSCON_INT_AXI_RESPONSE (1 << 8)
+#define GSC_BUSCON_AWCACHE(x) ((x) << 4)
+#define GSC_BUSCON_ARCACHE(x) ((x) << 0)
+
+/* G-Scaler V position */
+#define GSC_VPOSITION 0xA7C
+#define GSC_VPOS_F(x) ((x) << 0)
+
+
+/* G-Scaler clock initial count */
+#define GSC_CLK_INIT_COUNT 0xC00
+#define GSC_CLK_GATE_MODE_INIT_CNT(x) ((x) << 0)
+
+/* G-Scaler clock snoop count */
+#define GSC_CLK_SNOOP_COUNT 0xC04
+#define GSC_CLK_GATE_MODE_SNOOP_CNT(x) ((x) << 0)
+
+/* SYSCON. GSCBLK_CFG */
+#define SYSREG_GSCBLK_CFG1 (S3C_VA_SYS + 0x0224)
+#define GSC_BLK_DISP1WB_DEST(x) (x << 10)
+#define GSC_BLK_SW_RESET_WB_DEST(x) (1 << (18 + x))
+#define GSC_BLK_PXLASYNC_LO_MASK_WB(x) (0 << (14 + x))
+#define GSC_BLK_GSCL_WB_IN_SRC_SEL(x) (1 << (2 * x))
+#define SYSREG_GSCBLK_CFG2 (S3C_VA_SYS + 0x2000)
+#define PXLASYNC_LO_MASK_CAMIF_GSCL(x) (1 << (x))
+
+#endif /* EXYNOS_REGS_GSC_H_ */
diff --git a/drivers/gpu/drm/exynos/regs-hdmi.h b/drivers/gpu/drm/exynos/regs-hdmi.h
index 9cc7c5e9718c..ef1b3eb3ba6e 100644
--- a/drivers/gpu/drm/exynos/regs-hdmi.h
+++ b/drivers/gpu/drm/exynos/regs-hdmi.h
@@ -176,6 +176,11 @@
#define HDMI_PHY_CMU HDMI_CTRL_BASE(0x007C)
#define HDMI_CORE_RSTOUT HDMI_CTRL_BASE(0x0080)
+/* PHY Control bit definition */
+
+/* HDMI_PHY_CON_0 */
+#define HDMI_PHY_POWER_OFF_EN (1 << 0)
+
/* Video related registers */
#define HDMI_YMAX HDMI_CORE_BASE(0x0060)
#define HDMI_YMIN HDMI_CORE_BASE(0x0064)
@@ -298,14 +303,14 @@
#define HDMI_AVI_HEADER1 HDMI_CORE_BASE(0x0714)
#define HDMI_AVI_HEADER2 HDMI_CORE_BASE(0x0718)
#define HDMI_AVI_CHECK_SUM HDMI_CORE_BASE(0x071C)
-#define HDMI_AVI_BYTE(n) HDMI_CORE_BASE(0x0720 + 4 * (n))
+#define HDMI_AVI_BYTE(n) HDMI_CORE_BASE(0x0720 + 4 * (n-1))
#define HDMI_AUI_CON HDMI_CORE_BASE(0x0800)
#define HDMI_AUI_HEADER0 HDMI_CORE_BASE(0x0810)
#define HDMI_AUI_HEADER1 HDMI_CORE_BASE(0x0814)
#define HDMI_AUI_HEADER2 HDMI_CORE_BASE(0x0818)
#define HDMI_AUI_CHECK_SUM HDMI_CORE_BASE(0x081C)
-#define HDMI_AUI_BYTE(n) HDMI_CORE_BASE(0x0820 + 4 * (n))
+#define HDMI_AUI_BYTE(n) HDMI_CORE_BASE(0x0820 + 4 * (n-1))
#define HDMI_MPG_CON HDMI_CORE_BASE(0x0900)
#define HDMI_MPG_CHECK_SUM HDMI_CORE_BASE(0x091C)
@@ -338,6 +343,19 @@
#define HDMI_AN_SEED_2 HDMI_CORE_BASE(0x0E60)
#define HDMI_AN_SEED_3 HDMI_CORE_BASE(0x0E64)
+/* AVI bit definition */
+#define HDMI_AVI_CON_DO_NOT_TRANSMIT (0 << 1)
+#define HDMI_AVI_CON_EVERY_VSYNC (1 << 1)
+
+#define AVI_ACTIVE_FORMAT_VALID (1 << 4)
+#define AVI_UNDERSCANNED_DISPLAY_VALID (1 << 1)
+
+/* AUI bit definition */
+#define HDMI_AUI_CON_NO_TRAN (0 << 0)
+
+/* VSI bit definition */
+#define HDMI_VSI_CON_DO_NOT_TRANSMIT (0 << 0)
+
/* HDCP related registers */
#define HDMI_HDCP_SHA1(n) HDMI_CORE_BASE(0x7000 + 4 * (n))
#define HDMI_HDCP_KSV_LIST(n) HDMI_CORE_BASE(0x7050 + 4 * (n))
diff --git a/drivers/gpu/drm/exynos/regs-rotator.h b/drivers/gpu/drm/exynos/regs-rotator.h
new file mode 100644
index 000000000000..a09ac6e180da
--- /dev/null
+++ b/drivers/gpu/drm/exynos/regs-rotator.h
@@ -0,0 +1,73 @@
+/* drivers/gpu/drm/exynos/regs-rotator.h
+ *
+ * Copyright (c) 2012 Samsung Electronics Co., Ltd.
+ * http://www.samsung.com/
+ *
+ * Register definition file for Samsung Rotator Interface (Rotator) driver
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+*/
+
+#ifndef EXYNOS_REGS_ROTATOR_H
+#define EXYNOS_REGS_ROTATOR_H
+
+/* Configuration */
+#define ROT_CONFIG 0x00
+#define ROT_CONFIG_IRQ (3 << 8)
+
+/* Image Control */
+#define ROT_CONTROL 0x10
+#define ROT_CONTROL_PATTERN_WRITE (1 << 16)
+#define ROT_CONTROL_FMT_YCBCR420_2P (1 << 8)
+#define ROT_CONTROL_FMT_RGB888 (6 << 8)
+#define ROT_CONTROL_FMT_MASK (7 << 8)
+#define ROT_CONTROL_FLIP_VERTICAL (2 << 6)
+#define ROT_CONTROL_FLIP_HORIZONTAL (3 << 6)
+#define ROT_CONTROL_FLIP_MASK (3 << 6)
+#define ROT_CONTROL_ROT_90 (1 << 4)
+#define ROT_CONTROL_ROT_180 (2 << 4)
+#define ROT_CONTROL_ROT_270 (3 << 4)
+#define ROT_CONTROL_ROT_MASK (3 << 4)
+#define ROT_CONTROL_START (1 << 0)
+
+/* Status */
+#define ROT_STATUS 0x20
+#define ROT_STATUS_IRQ_PENDING(x) (1 << (x))
+#define ROT_STATUS_IRQ(x) (((x) >> 8) & 0x3)
+#define ROT_STATUS_IRQ_VAL_COMPLETE 1
+#define ROT_STATUS_IRQ_VAL_ILLEGAL 2
+
+/* Buffer Address */
+#define ROT_SRC_BUF_ADDR(n) (0x30 + ((n) << 2))
+#define ROT_DST_BUF_ADDR(n) (0x50 + ((n) << 2))
+
+/* Buffer Size */
+#define ROT_SRC_BUF_SIZE 0x3c
+#define ROT_DST_BUF_SIZE 0x5c
+#define ROT_SET_BUF_SIZE_H(x) ((x) << 16)
+#define ROT_SET_BUF_SIZE_W(x) ((x) << 0)
+#define ROT_GET_BUF_SIZE_H(x) ((x) >> 16)
+#define ROT_GET_BUF_SIZE_W(x) ((x) & 0xffff)
+
+/* Crop Position */
+#define ROT_SRC_CROP_POS 0x40
+#define ROT_DST_CROP_POS 0x60
+#define ROT_CROP_POS_Y(x) ((x) << 16)
+#define ROT_CROP_POS_X(x) ((x) << 0)
+
+/* Source Crop Size */
+#define ROT_SRC_CROP_SIZE 0x44
+#define ROT_SRC_CROP_SIZE_H(x) ((x) << 16)
+#define ROT_SRC_CROP_SIZE_W(x) ((x) << 0)
+
+/* Round to nearest aligned value */
+#define ROT_ALIGN(x, align, mask) (((x) + (1 << ((align) - 1))) & (mask))
+/* Minimum limit value */
+#define ROT_MIN(min, mask) (((min) + ~(mask)) & (mask))
+/* Maximum limit value */
+#define ROT_MAX(max, mask) ((max) & (mask))
+
+#endif /* EXYNOS_REGS_ROTATOR_H */
+
diff --git a/drivers/gpu/drm/gma500/cdv_device.c b/drivers/gpu/drm/gma500/cdv_device.c
index 1ceca3d13b65..23e14e93991f 100644
--- a/drivers/gpu/drm/gma500/cdv_device.c
+++ b/drivers/gpu/drm/gma500/cdv_device.c
@@ -523,7 +523,7 @@ void cdv_intel_attach_force_audio_property(struct drm_connector *connector)
dev_priv->force_audio_property = prop;
}
- drm_connector_attach_property(connector, prop, 0);
+ drm_object_attach_property(&connector->base, prop, 0);
}
@@ -553,7 +553,7 @@ void cdv_intel_attach_broadcast_rgb_property(struct drm_connector *connector)
dev_priv->broadcast_rgb_property = prop;
}
- drm_connector_attach_property(connector, prop, 0);
+ drm_object_attach_property(&connector->base, prop, 0);
}
/* Cedarview */
diff --git a/drivers/gpu/drm/gma500/cdv_intel_dp.c b/drivers/gpu/drm/gma500/cdv_intel_dp.c
index e3a3978cf320..51044cc55cf2 100644
--- a/drivers/gpu/drm/gma500/cdv_intel_dp.c
+++ b/drivers/gpu/drm/gma500/cdv_intel_dp.c
@@ -1650,7 +1650,7 @@ cdv_intel_dp_set_property(struct drm_connector *connector,
struct cdv_intel_dp *intel_dp = encoder->dev_priv;
int ret;
- ret = drm_connector_property_set_value(connector, property, val);
+ ret = drm_object_property_set_value(&connector->base, property, val);
if (ret)
return ret;
diff --git a/drivers/gpu/drm/gma500/cdv_intel_hdmi.c b/drivers/gpu/drm/gma500/cdv_intel_hdmi.c
index 7272a461edfe..e223b500022e 100644
--- a/drivers/gpu/drm/gma500/cdv_intel_hdmi.c
+++ b/drivers/gpu/drm/gma500/cdv_intel_hdmi.c
@@ -185,14 +185,14 @@ static int cdv_hdmi_set_property(struct drm_connector *connector,
return -1;
}
- if (drm_connector_property_get_value(connector,
+ if (drm_object_property_get_value(&connector->base,
property, &curValue))
return -1;
if (curValue == value)
return 0;
- if (drm_connector_property_set_value(connector,
+ if (drm_object_property_set_value(&connector->base,
property, value))
return -1;
@@ -341,7 +341,7 @@ void cdv_hdmi_init(struct drm_device *dev,
connector->interlace_allowed = false;
connector->doublescan_allowed = false;
- drm_connector_attach_property(connector,
+ drm_object_attach_property(&connector->base,
dev->mode_config.scaling_mode_property,
DRM_MODE_SCALE_FULLSCREEN);
diff --git a/drivers/gpu/drm/gma500/cdv_intel_lvds.c b/drivers/gpu/drm/gma500/cdv_intel_lvds.c
index b362dd39bf5a..d81dbc3368f0 100644
--- a/drivers/gpu/drm/gma500/cdv_intel_lvds.c
+++ b/drivers/gpu/drm/gma500/cdv_intel_lvds.c
@@ -479,7 +479,7 @@ static int cdv_intel_lvds_set_property(struct drm_connector *connector,
return -1;
}
- if (drm_connector_property_get_value(connector,
+ if (drm_object_property_get_value(&connector->base,
property,
&curValue))
return -1;
@@ -487,7 +487,7 @@ static int cdv_intel_lvds_set_property(struct drm_connector *connector,
if (curValue == value)
return 0;
- if (drm_connector_property_set_value(connector,
+ if (drm_object_property_set_value(&connector->base,
property,
value))
return -1;
@@ -502,7 +502,7 @@ static int cdv_intel_lvds_set_property(struct drm_connector *connector,
return -1;
}
} else if (!strcmp(property->name, "backlight") && encoder) {
- if (drm_connector_property_set_value(connector,
+ if (drm_object_property_set_value(&connector->base,
property,
value))
return -1;
@@ -671,10 +671,10 @@ void cdv_intel_lvds_init(struct drm_device *dev,
connector->doublescan_allowed = false;
/*Attach connector properties*/
- drm_connector_attach_property(connector,
+ drm_object_attach_property(&connector->base,
dev->mode_config.scaling_mode_property,
DRM_MODE_SCALE_FULLSCREEN);
- drm_connector_attach_property(connector,
+ drm_object_attach_property(&connector->base,
dev_priv->backlight_property,
BRIGHTNESS_MAX_LEVEL);
diff --git a/drivers/gpu/drm/gma500/mdfld_dsi_output.c b/drivers/gpu/drm/gma500/mdfld_dsi_output.c
index 637dd84785e2..2d4ab48f07a2 100644
--- a/drivers/gpu/drm/gma500/mdfld_dsi_output.c
+++ b/drivers/gpu/drm/gma500/mdfld_dsi_output.c
@@ -265,13 +265,13 @@ static int mdfld_dsi_connector_set_property(struct drm_connector *connector,
goto set_prop_error;
}
- if (drm_connector_property_get_value(connector, property, &val))
+ if (drm_object_property_get_value(&connector->base, property, &val))
goto set_prop_error;
if (val == value)
goto set_prop_done;
- if (drm_connector_property_set_value(connector,
+ if (drm_object_property_set_value(&connector->base,
property, value))
goto set_prop_error;
@@ -296,7 +296,7 @@ static int mdfld_dsi_connector_set_property(struct drm_connector *connector,
}
}
} else if (!strcmp(property->name, "backlight") && encoder) {
- if (drm_connector_property_set_value(connector, property,
+ if (drm_object_property_set_value(&connector->base, property,
value))
goto set_prop_error;
else
@@ -572,10 +572,10 @@ void mdfld_dsi_output_init(struct drm_device *dev,
connector->doublescan_allowed = false;
/*attach properties*/
- drm_connector_attach_property(connector,
+ drm_object_attach_property(&connector->base,
dev->mode_config.scaling_mode_property,
DRM_MODE_SCALE_FULLSCREEN);
- drm_connector_attach_property(connector,
+ drm_object_attach_property(&connector->base,
dev_priv->backlight_property,
MDFLD_DSI_BRIGHTNESS_MAX_LEVEL);
diff --git a/drivers/gpu/drm/gma500/mdfld_intel_display.c b/drivers/gpu/drm/gma500/mdfld_intel_display.c
index dec6a9aea3c6..74485dc43945 100644
--- a/drivers/gpu/drm/gma500/mdfld_intel_display.c
+++ b/drivers/gpu/drm/gma500/mdfld_intel_display.c
@@ -820,7 +820,7 @@ static int mdfld_crtc_mode_set(struct drm_crtc *crtc,
REG_WRITE(map->pos, 0);
if (psb_intel_encoder)
- drm_connector_property_get_value(connector,
+ drm_object_property_get_value(&connector->base,
dev->mode_config.scaling_mode_property, &scalingType);
if (scalingType == DRM_MODE_SCALE_NO_SCALE) {
diff --git a/drivers/gpu/drm/gma500/oaktrail_crtc.c b/drivers/gpu/drm/gma500/oaktrail_crtc.c
index 4ec2962f7635..3071526bc3c1 100644
--- a/drivers/gpu/drm/gma500/oaktrail_crtc.c
+++ b/drivers/gpu/drm/gma500/oaktrail_crtc.c
@@ -351,7 +351,7 @@ static int oaktrail_crtc_mode_set(struct drm_crtc *crtc,
(mode->crtc_vdisplay - 1));
if (psb_intel_encoder)
- drm_connector_property_get_value(connector,
+ drm_object_property_get_value(&connector->base,
dev->mode_config.scaling_mode_property, &scalingType);
if (scalingType == DRM_MODE_SCALE_NO_SCALE) {
diff --git a/drivers/gpu/drm/gma500/oaktrail_lvds.c b/drivers/gpu/drm/gma500/oaktrail_lvds.c
index 558c77fb55ec..325013a9c48c 100644
--- a/drivers/gpu/drm/gma500/oaktrail_lvds.c
+++ b/drivers/gpu/drm/gma500/oaktrail_lvds.c
@@ -133,8 +133,8 @@ static void oaktrail_lvds_mode_set(struct drm_encoder *encoder,
return;
}
- drm_connector_property_get_value(
- connector,
+ drm_object_property_get_value(
+ &connector->base,
dev->mode_config.scaling_mode_property,
&v);
@@ -363,10 +363,10 @@ void oaktrail_lvds_init(struct drm_device *dev,
connector->interlace_allowed = false;
connector->doublescan_allowed = false;
- drm_connector_attach_property(connector,
+ drm_object_attach_property(&connector->base,
dev->mode_config.scaling_mode_property,
DRM_MODE_SCALE_FULLSCREEN);
- drm_connector_attach_property(connector,
+ drm_object_attach_property(&connector->base,
dev_priv->backlight_property,
BRIGHTNESS_MAX_LEVEL);
diff --git a/drivers/gpu/drm/gma500/psb_intel_lvds.c b/drivers/gpu/drm/gma500/psb_intel_lvds.c
index 2a4c3a9e33e3..9fa5fa2e6192 100644
--- a/drivers/gpu/drm/gma500/psb_intel_lvds.c
+++ b/drivers/gpu/drm/gma500/psb_intel_lvds.c
@@ -603,7 +603,7 @@ int psb_intel_lvds_set_property(struct drm_connector *connector,
goto set_prop_error;
}
- if (drm_connector_property_get_value(connector,
+ if (drm_object_property_get_value(&connector->base,
property,
&curval))
goto set_prop_error;
@@ -611,7 +611,7 @@ int psb_intel_lvds_set_property(struct drm_connector *connector,
if (curval == value)
goto set_prop_done;
- if (drm_connector_property_set_value(connector,
+ if (drm_object_property_set_value(&connector->base,
property,
value))
goto set_prop_error;
@@ -626,7 +626,7 @@ int psb_intel_lvds_set_property(struct drm_connector *connector,
goto set_prop_error;
}
} else if (!strcmp(property->name, "backlight")) {
- if (drm_connector_property_set_value(connector,
+ if (drm_object_property_set_value(&connector->base,
property,
value))
goto set_prop_error;
@@ -746,10 +746,10 @@ void psb_intel_lvds_init(struct drm_device *dev,
connector->doublescan_allowed = false;
/*Attach connector properties*/
- drm_connector_attach_property(connector,
+ drm_object_attach_property(&connector->base,
dev->mode_config.scaling_mode_property,
DRM_MODE_SCALE_FULLSCREEN);
- drm_connector_attach_property(connector,
+ drm_object_attach_property(&connector->base,
dev_priv->backlight_property,
BRIGHTNESS_MAX_LEVEL);
diff --git a/drivers/gpu/drm/gma500/psb_intel_sdvo.c b/drivers/gpu/drm/gma500/psb_intel_sdvo.c
index fc9292705dbf..a4cc777ab7a6 100644
--- a/drivers/gpu/drm/gma500/psb_intel_sdvo.c
+++ b/drivers/gpu/drm/gma500/psb_intel_sdvo.c
@@ -1694,7 +1694,7 @@ psb_intel_sdvo_set_property(struct drm_connector *connector,
uint8_t cmd;
int ret;
- ret = drm_connector_property_set_value(connector, property, val);
+ ret = drm_object_property_set_value(&connector->base, property, val);
if (ret)
return ret;
@@ -1749,7 +1749,7 @@ psb_intel_sdvo_set_property(struct drm_connector *connector,
} else if (IS_TV_OR_LVDS(psb_intel_sdvo_connector)) {
temp_value = val;
if (psb_intel_sdvo_connector->left == property) {
- drm_connector_property_set_value(connector,
+ drm_object_property_set_value(&connector->base,
psb_intel_sdvo_connector->right, val);
if (psb_intel_sdvo_connector->left_margin == temp_value)
return 0;
@@ -1761,7 +1761,7 @@ psb_intel_sdvo_set_property(struct drm_connector *connector,
cmd = SDVO_CMD_SET_OVERSCAN_H;
goto set_value;
} else if (psb_intel_sdvo_connector->right == property) {
- drm_connector_property_set_value(connector,
+ drm_object_property_set_value(&connector->base,
psb_intel_sdvo_connector->left, val);
if (psb_intel_sdvo_connector->right_margin == temp_value)
return 0;
@@ -1773,7 +1773,7 @@ psb_intel_sdvo_set_property(struct drm_connector *connector,
cmd = SDVO_CMD_SET_OVERSCAN_H;
goto set_value;
} else if (psb_intel_sdvo_connector->top == property) {
- drm_connector_property_set_value(connector,
+ drm_object_property_set_value(&connector->base,
psb_intel_sdvo_connector->bottom, val);
if (psb_intel_sdvo_connector->top_margin == temp_value)
return 0;
@@ -1785,7 +1785,7 @@ psb_intel_sdvo_set_property(struct drm_connector *connector,
cmd = SDVO_CMD_SET_OVERSCAN_V;
goto set_value;
} else if (psb_intel_sdvo_connector->bottom == property) {
- drm_connector_property_set_value(connector,
+ drm_object_property_set_value(&connector->base,
psb_intel_sdvo_connector->top, val);
if (psb_intel_sdvo_connector->bottom_margin == temp_value)
return 0;
@@ -2286,7 +2286,7 @@ static bool psb_intel_sdvo_tv_create_property(struct psb_intel_sdvo *psb_intel_s
i, tv_format_names[psb_intel_sdvo_connector->tv_format_supported[i]]);
psb_intel_sdvo->tv_format_index = psb_intel_sdvo_connector->tv_format_supported[0];
- drm_connector_attach_property(&psb_intel_sdvo_connector->base.base,
+ drm_object_attach_property(&psb_intel_sdvo_connector->base.base.base,
psb_intel_sdvo_connector->tv_format, 0);
return true;
@@ -2302,7 +2302,7 @@ static bool psb_intel_sdvo_tv_create_property(struct psb_intel_sdvo *psb_intel_s
psb_intel_sdvo_connector->name = \
drm_property_create_range(dev, 0, #name, 0, data_value[0]); \
if (!psb_intel_sdvo_connector->name) return false; \
- drm_connector_attach_property(connector, \
+ drm_object_attach_property(&connector->base, \
psb_intel_sdvo_connector->name, \
psb_intel_sdvo_connector->cur_##name); \
DRM_DEBUG_KMS(#name ": max %d, default %d, current %d\n", \
@@ -2339,7 +2339,7 @@ psb_intel_sdvo_create_enhance_property_tv(struct psb_intel_sdvo *psb_intel_sdvo,
if (!psb_intel_sdvo_connector->left)
return false;
- drm_connector_attach_property(connector,
+ drm_object_attach_property(&connector->base,
psb_intel_sdvo_connector->left,
psb_intel_sdvo_connector->left_margin);
@@ -2348,7 +2348,7 @@ psb_intel_sdvo_create_enhance_property_tv(struct psb_intel_sdvo *psb_intel_sdvo,
if (!psb_intel_sdvo_connector->right)
return false;
- drm_connector_attach_property(connector,
+ drm_object_attach_property(&connector->base,
psb_intel_sdvo_connector->right,
psb_intel_sdvo_connector->right_margin);
DRM_DEBUG_KMS("h_overscan: max %d, "
@@ -2375,7 +2375,7 @@ psb_intel_sdvo_create_enhance_property_tv(struct psb_intel_sdvo *psb_intel_sdvo,
if (!psb_intel_sdvo_connector->top)
return false;
- drm_connector_attach_property(connector,
+ drm_object_attach_property(&connector->base,
psb_intel_sdvo_connector->top,
psb_intel_sdvo_connector->top_margin);
@@ -2384,7 +2384,7 @@ psb_intel_sdvo_create_enhance_property_tv(struct psb_intel_sdvo *psb_intel_sdvo,
if (!psb_intel_sdvo_connector->bottom)
return false;
- drm_connector_attach_property(connector,
+ drm_object_attach_property(&connector->base,
psb_intel_sdvo_connector->bottom,
psb_intel_sdvo_connector->bottom_margin);
DRM_DEBUG_KMS("v_overscan: max %d, "
@@ -2416,7 +2416,7 @@ psb_intel_sdvo_create_enhance_property_tv(struct psb_intel_sdvo *psb_intel_sdvo,
if (!psb_intel_sdvo_connector->dot_crawl)
return false;
- drm_connector_attach_property(connector,
+ drm_object_attach_property(&connector->base,
psb_intel_sdvo_connector->dot_crawl,
psb_intel_sdvo_connector->cur_dot_crawl);
DRM_DEBUG_KMS("dot crawl: current %d\n", response);
diff --git a/drivers/gpu/drm/i2c/ch7006_drv.c b/drivers/gpu/drm/i2c/ch7006_drv.c
index 599099fe76e3..b865d0728e28 100644
--- a/drivers/gpu/drm/i2c/ch7006_drv.c
+++ b/drivers/gpu/drm/i2c/ch7006_drv.c
@@ -214,7 +214,7 @@ static enum drm_connector_status ch7006_encoder_detect(struct drm_encoder *encod
else
priv->subconnector = DRM_MODE_SUBCONNECTOR_Unknown;
- drm_connector_property_set_value(connector,
+ drm_object_property_set_value(&connector->base,
encoder->dev->mode_config.tv_subconnector_property,
priv->subconnector);
@@ -254,23 +254,23 @@ static int ch7006_encoder_create_resources(struct drm_encoder *encoder,
priv->scale_property = drm_property_create_range(dev, 0, "scale", 0, 2);
- drm_connector_attach_property(connector, conf->tv_select_subconnector_property,
+ drm_object_attach_property(&connector->base, conf->tv_select_subconnector_property,
priv->select_subconnector);
- drm_connector_attach_property(connector, conf->tv_subconnector_property,
+ drm_object_attach_property(&connector->base, conf->tv_subconnector_property,
priv->subconnector);
- drm_connector_attach_property(connector, conf->tv_left_margin_property,
+ drm_object_attach_property(&connector->base, conf->tv_left_margin_property,
priv->hmargin);
- drm_connector_attach_property(connector, conf->tv_bottom_margin_property,
+ drm_object_attach_property(&connector->base, conf->tv_bottom_margin_property,
priv->vmargin);
- drm_connector_attach_property(connector, conf->tv_mode_property,
+ drm_object_attach_property(&connector->base, conf->tv_mode_property,
priv->norm);
- drm_connector_attach_property(connector, conf->tv_brightness_property,
+ drm_object_attach_property(&connector->base, conf->tv_brightness_property,
priv->brightness);
- drm_connector_attach_property(connector, conf->tv_contrast_property,
+ drm_object_attach_property(&connector->base, conf->tv_contrast_property,
priv->contrast);
- drm_connector_attach_property(connector, conf->tv_flicker_reduction_property,
+ drm_object_attach_property(&connector->base, conf->tv_flicker_reduction_property,
priv->flicker);
- drm_connector_attach_property(connector, priv->scale_property,
+ drm_object_attach_property(&connector->base, priv->scale_property,
priv->scale);
return 0;
diff --git a/drivers/gpu/drm/i915/intel_bios.c b/drivers/gpu/drm/i915/intel_bios.c
index 87e9b92039df..55ffba1f5818 100644
--- a/drivers/gpu/drm/i915/intel_bios.c
+++ b/drivers/gpu/drm/i915/intel_bios.c
@@ -499,12 +499,8 @@ parse_edp(struct drm_i915_private *dev_priv, struct bdb_header *bdb)
edp = find_section(bdb, BDB_EDP);
if (!edp) {
- if (SUPPORTS_EDP(dev_priv->dev) && dev_priv->edp.support) {
- DRM_DEBUG_KMS("No eDP BDB found but eDP panel "
- "supported, assume %dbpp panel color "
- "depth.\n",
- dev_priv->edp.bpp);
- }
+ if (SUPPORTS_EDP(dev_priv->dev) && dev_priv->edp.support)
+ DRM_DEBUG_KMS("No eDP BDB found but eDP panel supported.\n");
return;
}
@@ -657,9 +653,6 @@ init_vbt_defaults(struct drm_i915_private *dev_priv)
dev_priv->lvds_use_ssc = 1;
dev_priv->lvds_ssc_freq = intel_bios_ssc_frequency(dev, 1);
DRM_DEBUG_KMS("Set default to SSC at %dMHz\n", dev_priv->lvds_ssc_freq);
-
- /* eDP data */
- dev_priv->edp.bpp = 18;
}
static int __init intel_no_opregion_vbt_callback(const struct dmi_system_id *id)
diff --git a/drivers/gpu/drm/i915/intel_crt.c b/drivers/gpu/drm/i915/intel_crt.c
index fe20bf7e8d24..9293878ec7eb 100644
--- a/drivers/gpu/drm/i915/intel_crt.c
+++ b/drivers/gpu/drm/i915/intel_crt.c
@@ -143,7 +143,7 @@ static void intel_crt_dpms(struct drm_connector *connector, int mode)
int old_dpms;
/* PCH platforms and VLV only support on/off. */
- if (INTEL_INFO(dev)->gen < 5 && mode != DRM_MODE_DPMS_ON)
+ if (INTEL_INFO(dev)->gen >= 5 && mode != DRM_MODE_DPMS_ON)
mode = DRM_MODE_DPMS_OFF;
if (mode == connector->dpms)
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index 82267b27b8f6..5d127e068950 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -4118,6 +4118,17 @@ static bool intel_choose_pipe_bpp_dither(struct drm_crtc *crtc,
}
}
+ if (intel_encoder->type == INTEL_OUTPUT_EDP) {
+ /* Use VBT settings if we have an eDP panel */
+ unsigned int edp_bpc = dev_priv->edp.bpp / 3;
+
+ if (edp_bpc && edp_bpc < display_bpc) {
+ DRM_DEBUG_KMS("clamping display bpc (was %d) to eDP (%d)\n", display_bpc, edp_bpc);
+ display_bpc = edp_bpc;
+ }
+ continue;
+ }
+
/*
* HDMI is either 12 or 8, so if the display lets 10bpc sneak
* through, clamp it down. (Note: >12bpc will be caught below.)
diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c
index d76258dcb8f8..1b63d55318a0 100644
--- a/drivers/gpu/drm/i915/intel_dp.c
+++ b/drivers/gpu/drm/i915/intel_dp.c
@@ -2569,8 +2569,8 @@ intel_dp_add_properties(struct intel_dp *intel_dp, struct drm_connector *connect
if (is_edp(intel_dp)) {
drm_mode_create_scaling_mode_property(connector->dev);
- drm_connector_attach_property(
- connector,
+ drm_object_attach_property(
+ &connector->base,
connector->dev->mode_config.scaling_mode_property,
DRM_MODE_SCALE_ASPECT);
intel_connector->panel.fitting_mode = DRM_MODE_SCALE_ASPECT;
diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c
index 42839fc73499..496caa73eb70 100644
--- a/drivers/gpu/drm/i915/intel_pm.c
+++ b/drivers/gpu/drm/i915/intel_pm.c
@@ -2379,15 +2379,9 @@ int intel_enable_rc6(const struct drm_device *dev)
if (i915_enable_rc6 >= 0)
return i915_enable_rc6;
- if (INTEL_INFO(dev)->gen == 5) {
-#ifdef CONFIG_INTEL_IOMMU
- /* Disable rc6 on ilk if VT-d is on. */
- if (intel_iommu_gfx_mapped)
- return false;
-#endif
- DRM_DEBUG_DRIVER("Ironlake: only RC6 available\n");
- return INTEL_RC6_ENABLE;
- }
+ /* Disable RC6 on Ironlake */
+ if (INTEL_INFO(dev)->gen == 5)
+ return 0;
if (IS_HASWELL(dev)) {
DRM_DEBUG_DRIVER("Haswell: only RC6 available\n");
diff --git a/drivers/gpu/drm/i915/intel_sdvo.c b/drivers/gpu/drm/i915/intel_sdvo.c
index 0e03985b0fe4..c275bf0fa36d 100644
--- a/drivers/gpu/drm/i915/intel_sdvo.c
+++ b/drivers/gpu/drm/i915/intel_sdvo.c
@@ -2251,7 +2251,6 @@ intel_sdvo_dvi_init(struct intel_sdvo *intel_sdvo, int device)
connector->connector_type = DRM_MODE_CONNECTOR_HDMIA;
intel_sdvo->is_hdmi = true;
}
- intel_sdvo->base.cloneable = true;
intel_sdvo_connector_init(intel_sdvo_connector, intel_sdvo);
if (intel_sdvo->is_hdmi)
@@ -2282,7 +2281,6 @@ intel_sdvo_tv_init(struct intel_sdvo *intel_sdvo, int type)
intel_sdvo->is_tv = true;
intel_sdvo->base.needs_tv_clock = true;
- intel_sdvo->base.cloneable = false;
intel_sdvo_connector_init(intel_sdvo_connector, intel_sdvo);
@@ -2325,8 +2323,6 @@ intel_sdvo_analog_init(struct intel_sdvo *intel_sdvo, int device)
intel_sdvo_connector->output_flag = SDVO_OUTPUT_RGB1;
}
- intel_sdvo->base.cloneable = true;
-
intel_sdvo_connector_init(intel_sdvo_connector,
intel_sdvo);
return true;
@@ -2357,9 +2353,6 @@ intel_sdvo_lvds_init(struct intel_sdvo *intel_sdvo, int device)
intel_sdvo_connector->output_flag = SDVO_OUTPUT_LVDS1;
}
- /* SDVO LVDS is not cloneable because the input mode gets adjusted by the encoder */
- intel_sdvo->base.cloneable = false;
-
intel_sdvo_connector_init(intel_sdvo_connector, intel_sdvo);
if (!intel_sdvo_create_enhance_property(intel_sdvo, intel_sdvo_connector))
goto err;
@@ -2432,6 +2425,18 @@ intel_sdvo_output_setup(struct intel_sdvo *intel_sdvo, uint16_t flags)
return true;
}
+static void intel_sdvo_output_cleanup(struct intel_sdvo *intel_sdvo)
+{
+ struct drm_device *dev = intel_sdvo->base.base.dev;
+ struct drm_connector *connector, *tmp;
+
+ list_for_each_entry_safe(connector, tmp,
+ &dev->mode_config.connector_list, head) {
+ if (intel_attached_encoder(connector) == &intel_sdvo->base)
+ intel_sdvo_destroy(connector);
+ }
+}
+
static bool intel_sdvo_tv_create_property(struct intel_sdvo *intel_sdvo,
struct intel_sdvo_connector *intel_sdvo_connector,
int type)
@@ -2753,9 +2758,20 @@ bool intel_sdvo_init(struct drm_device *dev, uint32_t sdvo_reg, bool is_sdvob)
intel_sdvo->caps.output_flags) != true) {
DRM_DEBUG_KMS("SDVO output failed to setup on %s\n",
SDVO_NAME(intel_sdvo));
- goto err;
+ /* Output_setup can leave behind connectors! */
+ goto err_output;
}
+ /*
+ * Cloning SDVO with anything is often impossible, since the SDVO
+ * encoder can request a special input timing mode. And even if that's
+ * not the case we have evidence that cloning a plain unscaled mode with
+ * VGA doesn't really work. Furthermore the cloning flags are way too
+ * simplistic anyway to express such constraints, so just give up on
+ * cloning for SDVO encoders.
+ */
+ intel_sdvo->base.cloneable = false;
+
/* Only enable the hotplug irq if we need it, to work around noisy
* hotplug lines.
*/
@@ -2766,12 +2782,12 @@ bool intel_sdvo_init(struct drm_device *dev, uint32_t sdvo_reg, bool is_sdvob)
/* Set the input timing to the screen. Assume always input 0. */
if (!intel_sdvo_set_target_input(intel_sdvo))
- goto err;
+ goto err_output;
if (!intel_sdvo_get_input_pixel_clock_range(intel_sdvo,
&intel_sdvo->pixel_clock_min,
&intel_sdvo->pixel_clock_max))
- goto err;
+ goto err_output;
DRM_DEBUG_KMS("%s device VID/DID: %02X:%02X.%02X, "
"clock range %dMHz - %dMHz, "
@@ -2791,6 +2807,9 @@ bool intel_sdvo_init(struct drm_device *dev, uint32_t sdvo_reg, bool is_sdvob)
(SDVO_OUTPUT_TMDS1 | SDVO_OUTPUT_RGB1) ? 'Y' : 'N');
return true;
+err_output:
+ intel_sdvo_output_cleanup(intel_sdvo);
+
err:
drm_encoder_cleanup(&intel_encoder->base);
i2c_del_adapter(&intel_sdvo->ddc);
diff --git a/drivers/gpu/drm/mgag200/mgag200_ttm.c b/drivers/gpu/drm/mgag200/mgag200_ttm.c
index 49d60a620122..8fc9d9201945 100644
--- a/drivers/gpu/drm/mgag200/mgag200_ttm.c
+++ b/drivers/gpu/drm/mgag200/mgag200_ttm.c
@@ -186,11 +186,11 @@ static void mgag200_ttm_io_mem_free(struct ttm_bo_device *bdev, struct ttm_mem_r
static int mgag200_bo_move(struct ttm_buffer_object *bo,
bool evict, bool interruptible,
- bool no_wait_reserve, bool no_wait_gpu,
+ bool no_wait_gpu,
struct ttm_mem_reg *new_mem)
{
int r;
- r = ttm_bo_move_memcpy(bo, evict, no_wait_reserve, no_wait_gpu, new_mem);
+ r = ttm_bo_move_memcpy(bo, evict, no_wait_gpu, new_mem);
return r;
}
@@ -382,7 +382,7 @@ int mgag200_bo_pin(struct mgag200_bo *bo, u32 pl_flag, u64 *gpu_addr)
mgag200_ttm_placement(bo, pl_flag);
for (i = 0; i < bo->placement.num_placement; i++)
bo->placements[i] |= TTM_PL_FLAG_NO_EVICT;
- ret = ttm_bo_validate(&bo->bo, &bo->placement, false, false, false);
+ ret = ttm_bo_validate(&bo->bo, &bo->placement, false, false);
if (ret)
return ret;
@@ -405,7 +405,7 @@ int mgag200_bo_unpin(struct mgag200_bo *bo)
for (i = 0; i < bo->placement.num_placement ; i++)
bo->placements[i] &= ~TTM_PL_FLAG_NO_EVICT;
- ret = ttm_bo_validate(&bo->bo, &bo->placement, false, false, false);
+ ret = ttm_bo_validate(&bo->bo, &bo->placement, false, false);
if (ret)
return ret;
@@ -430,7 +430,7 @@ int mgag200_bo_push_sysram(struct mgag200_bo *bo)
for (i = 0; i < bo->placement.num_placement ; i++)
bo->placements[i] |= TTM_PL_FLAG_NO_EVICT;
- ret = ttm_bo_validate(&bo->bo, &bo->placement, false, false, false);
+ ret = ttm_bo_validate(&bo->bo, &bo->placement, false, false);
if (ret) {
DRM_ERROR("pushing to VRAM failed\n");
return ret;
diff --git a/drivers/gpu/drm/nouveau/Makefile b/drivers/gpu/drm/nouveau/Makefile
index a990df4d6c04..ab25752a0b1e 100644
--- a/drivers/gpu/drm/nouveau/Makefile
+++ b/drivers/gpu/drm/nouveau/Makefile
@@ -11,6 +11,7 @@ nouveau-y := core/core/client.o
nouveau-y += core/core/engctx.o
nouveau-y += core/core/engine.o
nouveau-y += core/core/enum.o
+nouveau-y += core/core/falcon.o
nouveau-y += core/core/gpuobj.o
nouveau-y += core/core/handle.o
nouveau-y += core/core/mm.o
@@ -29,6 +30,7 @@ nouveau-y += core/subdev/bios/base.o
nouveau-y += core/subdev/bios/bit.o
nouveau-y += core/subdev/bios/conn.o
nouveau-y += core/subdev/bios/dcb.o
+nouveau-y += core/subdev/bios/disp.o
nouveau-y += core/subdev/bios/dp.o
nouveau-y += core/subdev/bios/extdev.o
nouveau-y += core/subdev/bios/gpio.o
@@ -64,9 +66,19 @@ nouveau-y += core/subdev/devinit/nv50.o
nouveau-y += core/subdev/fb/base.o
nouveau-y += core/subdev/fb/nv04.o
nouveau-y += core/subdev/fb/nv10.o
+nouveau-y += core/subdev/fb/nv1a.o
nouveau-y += core/subdev/fb/nv20.o
+nouveau-y += core/subdev/fb/nv25.o
nouveau-y += core/subdev/fb/nv30.o
+nouveau-y += core/subdev/fb/nv35.o
+nouveau-y += core/subdev/fb/nv36.o
nouveau-y += core/subdev/fb/nv40.o
+nouveau-y += core/subdev/fb/nv41.o
+nouveau-y += core/subdev/fb/nv44.o
+nouveau-y += core/subdev/fb/nv46.o
+nouveau-y += core/subdev/fb/nv47.o
+nouveau-y += core/subdev/fb/nv49.o
+nouveau-y += core/subdev/fb/nv4e.o
nouveau-y += core/subdev/fb/nv50.o
nouveau-y += core/subdev/fb/nvc0.o
nouveau-y += core/subdev/gpio/base.o
@@ -111,7 +123,10 @@ nouveau-y += core/engine/dmaobj/base.o
nouveau-y += core/engine/dmaobj/nv04.o
nouveau-y += core/engine/dmaobj/nv50.o
nouveau-y += core/engine/dmaobj/nvc0.o
+nouveau-y += core/engine/dmaobj/nvd0.o
nouveau-y += core/engine/bsp/nv84.o
+nouveau-y += core/engine/bsp/nvc0.o
+nouveau-y += core/engine/bsp/nve0.o
nouveau-y += core/engine/copy/nva3.o
nouveau-y += core/engine/copy/nvc0.o
nouveau-y += core/engine/copy/nve0.o
@@ -119,7 +134,21 @@ nouveau-y += core/engine/crypt/nv84.o
nouveau-y += core/engine/crypt/nv98.o
nouveau-y += core/engine/disp/nv04.o
nouveau-y += core/engine/disp/nv50.o
+nouveau-y += core/engine/disp/nv84.o
+nouveau-y += core/engine/disp/nv94.o
+nouveau-y += core/engine/disp/nva0.o
+nouveau-y += core/engine/disp/nva3.o
nouveau-y += core/engine/disp/nvd0.o
+nouveau-y += core/engine/disp/nve0.o
+nouveau-y += core/engine/disp/dacnv50.o
+nouveau-y += core/engine/disp/hdanva3.o
+nouveau-y += core/engine/disp/hdanvd0.o
+nouveau-y += core/engine/disp/hdminv84.o
+nouveau-y += core/engine/disp/hdminva3.o
+nouveau-y += core/engine/disp/hdminvd0.o
+nouveau-y += core/engine/disp/sornv50.o
+nouveau-y += core/engine/disp/sornv94.o
+nouveau-y += core/engine/disp/sornvd0.o
nouveau-y += core/engine/disp/vga.o
nouveau-y += core/engine/fifo/base.o
nouveau-y += core/engine/fifo/nv04.o
@@ -151,11 +180,14 @@ nouveau-y += core/engine/mpeg/nv40.o
nouveau-y += core/engine/mpeg/nv50.o
nouveau-y += core/engine/mpeg/nv84.o
nouveau-y += core/engine/ppp/nv98.o
+nouveau-y += core/engine/ppp/nvc0.o
nouveau-y += core/engine/software/nv04.o
nouveau-y += core/engine/software/nv10.o
nouveau-y += core/engine/software/nv50.o
nouveau-y += core/engine/software/nvc0.o
nouveau-y += core/engine/vp/nv84.o
+nouveau-y += core/engine/vp/nvc0.o
+nouveau-y += core/engine/vp/nve0.o
# drm/core
nouveau-y += nouveau_drm.o nouveau_chan.o nouveau_dma.o nouveau_fence.o
@@ -166,7 +198,7 @@ nouveau-y += nv04_fence.o nv10_fence.o nv50_fence.o nv84_fence.o nvc0_fence.o
# drm/kms
nouveau-y += nouveau_bios.o nouveau_fbcon.o nouveau_display.o
-nouveau-y += nouveau_connector.o nouveau_hdmi.o nouveau_dp.o
+nouveau-y += nouveau_connector.o nouveau_dp.o
nouveau-y += nv04_fbcon.o nv50_fbcon.o nvc0_fbcon.o
# drm/kms/nv04:nv50
@@ -175,9 +207,7 @@ nouveau-y += nv04_dac.o nv04_dfp.o nv04_tv.o nv17_tv.o nv17_tv_modes.o
nouveau-y += nv04_crtc.o nv04_display.o nv04_cursor.o
# drm/kms/nv50-
-nouveau-y += nv50_display.o nvd0_display.o
-nouveau-y += nv50_crtc.o nv50_dac.o nv50_sor.o nv50_cursor.o
-nouveau-y += nv50_evo.o
+nouveau-y += nv50_display.o
# drm/pm
nouveau-y += nouveau_pm.o nouveau_volt.o nouveau_perf.o
diff --git a/drivers/gpu/drm/nouveau/core/core/engctx.c b/drivers/gpu/drm/nouveau/core/core/engctx.c
index e41b10d5eb59..84c71fad2b6c 100644
--- a/drivers/gpu/drm/nouveau/core/core/engctx.c
+++ b/drivers/gpu/drm/nouveau/core/core/engctx.c
@@ -189,6 +189,21 @@ nouveau_engctx_fini(struct nouveau_engctx *engctx, bool suspend)
return nouveau_gpuobj_fini(&engctx->base, suspend);
}
+int
+_nouveau_engctx_ctor(struct nouveau_object *parent,
+ struct nouveau_object *engine,
+ struct nouveau_oclass *oclass, void *data, u32 size,
+ struct nouveau_object **pobject)
+{
+ struct nouveau_engctx *engctx;
+ int ret;
+
+ ret = nouveau_engctx_create(parent, engine, oclass, NULL, 256, 256,
+ NVOBJ_FLAG_ZERO_ALLOC, &engctx);
+ *pobject = nv_object(engctx);
+ return ret;
+}
+
void
_nouveau_engctx_dtor(struct nouveau_object *object)
{
diff --git a/drivers/gpu/drm/nouveau/core/core/falcon.c b/drivers/gpu/drm/nouveau/core/core/falcon.c
new file mode 100644
index 000000000000..6b0843c33877
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/core/falcon.c
@@ -0,0 +1,247 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#include <core/falcon.h>
+
+#include <subdev/timer.h>
+
+u32
+_nouveau_falcon_rd32(struct nouveau_object *object, u64 addr)
+{
+ struct nouveau_falcon *falcon = (void *)object;
+ return nv_rd32(falcon, falcon->addr + addr);
+}
+
+void
+_nouveau_falcon_wr32(struct nouveau_object *object, u64 addr, u32 data)
+{
+ struct nouveau_falcon *falcon = (void *)object;
+ nv_wr32(falcon, falcon->addr + addr, data);
+}
+
+int
+_nouveau_falcon_init(struct nouveau_object *object)
+{
+ struct nouveau_device *device = nv_device(object);
+ struct nouveau_falcon *falcon = (void *)object;
+ const struct firmware *fw;
+ char name[32] = "internal";
+ int ret, i;
+ u32 caps;
+
+ /* enable engine, and determine its capabilities */
+ ret = nouveau_engine_init(&falcon->base);
+ if (ret)
+ return ret;
+
+ if (device->chipset < 0xa3 ||
+ device->chipset == 0xaa || device->chipset == 0xac) {
+ falcon->version = 0;
+ falcon->secret = (falcon->addr == 0x087000) ? 1 : 0;
+ } else {
+ caps = nv_ro32(falcon, 0x12c);
+ falcon->version = (caps & 0x0000000f);
+ falcon->secret = (caps & 0x00000030) >> 4;
+ }
+
+ caps = nv_ro32(falcon, 0x108);
+ falcon->code.limit = (caps & 0x000001ff) << 8;
+ falcon->data.limit = (caps & 0x0003fe00) >> 1;
+
+ nv_debug(falcon, "falcon version: %d\n", falcon->version);
+ nv_debug(falcon, "secret level: %d\n", falcon->secret);
+ nv_debug(falcon, "code limit: %d\n", falcon->code.limit);
+ nv_debug(falcon, "data limit: %d\n", falcon->data.limit);
+
+ /* wait for 'uc halted' to be signalled before continuing */
+ if (falcon->secret) {
+ nv_wait(falcon, 0x008, 0x00000010, 0x00000010);
+ nv_wo32(falcon, 0x004, 0x00000010);
+ }
+
+ /* disable all interrupts */
+ nv_wo32(falcon, 0x014, 0xffffffff);
+
+ /* no default ucode provided by the engine implementation, try and
+ * locate a "self-bootstrapping" firmware image for the engine
+ */
+ if (!falcon->code.data) {
+ snprintf(name, sizeof(name), "nouveau/nv%02x_fuc%03x",
+ device->chipset, falcon->addr >> 12);
+
+ ret = request_firmware(&fw, name, &device->pdev->dev);
+ if (ret == 0) {
+ falcon->code.data = kmemdup(fw->data, fw->size, GFP_KERNEL);
+ falcon->code.size = fw->size;
+ falcon->data.data = NULL;
+ falcon->data.size = 0;
+ release_firmware(fw);
+ }
+
+ falcon->external = true;
+ }
+
+ /* next step is to try and load "static code/data segment" firmware
+ * images for the engine
+ */
+ if (!falcon->code.data) {
+ snprintf(name, sizeof(name), "nouveau/nv%02x_fuc%03xd",
+ device->chipset, falcon->addr >> 12);
+
+ ret = request_firmware(&fw, name, &device->pdev->dev);
+ if (ret) {
+ nv_error(falcon, "unable to load firmware data\n");
+ return ret;
+ }
+
+ falcon->data.data = kmemdup(fw->data, fw->size, GFP_KERNEL);
+ falcon->data.size = fw->size;
+ release_firmware(fw);
+ if (!falcon->data.data)
+ return -ENOMEM;
+
+ snprintf(name, sizeof(name), "nouveau/nv%02x_fuc%03xc",
+ device->chipset, falcon->addr >> 12);
+
+ ret = request_firmware(&fw, name, &device->pdev->dev);
+ if (ret) {
+ nv_error(falcon, "unable to load firmware code\n");
+ return ret;
+ }
+
+ falcon->code.data = kmemdup(fw->data, fw->size, GFP_KERNEL);
+ falcon->code.size = fw->size;
+ release_firmware(fw);
+ if (!falcon->code.data)
+ return -ENOMEM;
+ }
+
+ nv_debug(falcon, "firmware: %s (%s)\n", name, falcon->data.data ?
+ "static code/data segments" : "self-bootstrapping");
+
+ /* ensure any "self-bootstrapping" firmware image is in vram */
+ if (!falcon->data.data && !falcon->core) {
+ ret = nouveau_gpuobj_new(object->parent, NULL,
+ falcon->code.size, 256, 0,
+ &falcon->core);
+ if (ret) {
+ nv_error(falcon, "core allocation failed, %d\n", ret);
+ return ret;
+ }
+
+ for (i = 0; i < falcon->code.size; i += 4)
+ nv_wo32(falcon->core, i, falcon->code.data[i / 4]);
+ }
+
+ /* upload firmware bootloader (or the full code segments) */
+ if (falcon->core) {
+ if (device->card_type < NV_C0)
+ nv_wo32(falcon, 0x618, 0x04000000);
+ else
+ nv_wo32(falcon, 0x618, 0x00000114);
+ nv_wo32(falcon, 0x11c, 0);
+ nv_wo32(falcon, 0x110, falcon->core->addr >> 8);
+ nv_wo32(falcon, 0x114, 0);
+ nv_wo32(falcon, 0x118, 0x00006610);
+ } else {
+ if (falcon->code.size > falcon->code.limit ||
+ falcon->data.size > falcon->data.limit) {
+ nv_error(falcon, "ucode exceeds falcon limit(s)\n");
+ return -EINVAL;
+ }
+
+ if (falcon->version < 3) {
+ nv_wo32(falcon, 0xff8, 0x00100000);
+ for (i = 0; i < falcon->code.size / 4; i++)
+ nv_wo32(falcon, 0xff4, falcon->code.data[i]);
+ } else {
+ nv_wo32(falcon, 0x180, 0x01000000);
+ for (i = 0; i < falcon->code.size / 4; i++) {
+ if ((i & 0x3f) == 0)
+ nv_wo32(falcon, 0x188, i >> 6);
+ nv_wo32(falcon, 0x184, falcon->code.data[i]);
+ }
+ }
+ }
+
+ /* upload data segment (if necessary), zeroing the remainder */
+ if (falcon->version < 3) {
+ nv_wo32(falcon, 0xff8, 0x00000000);
+ for (i = 0; !falcon->core && i < falcon->data.size / 4; i++)
+ nv_wo32(falcon, 0xff4, falcon->data.data[i]);
+ for (; i < falcon->data.limit; i += 4)
+ nv_wo32(falcon, 0xff4, 0x00000000);
+ } else {
+ nv_wo32(falcon, 0x1c0, 0x01000000);
+ for (i = 0; !falcon->core && i < falcon->data.size / 4; i++)
+ nv_wo32(falcon, 0x1c4, falcon->data.data[i]);
+ for (; i < falcon->data.limit / 4; i++)
+ nv_wo32(falcon, 0x1c4, 0x00000000);
+ }
+
+ /* start it running */
+ nv_wo32(falcon, 0x10c, 0x00000001); /* BLOCK_ON_FIFO */
+ nv_wo32(falcon, 0x104, 0x00000000); /* ENTRY */
+ nv_wo32(falcon, 0x100, 0x00000002); /* TRIGGER */
+ nv_wo32(falcon, 0x048, 0x00000003); /* FIFO | CHSW */
+ return 0;
+}
+
+int
+_nouveau_falcon_fini(struct nouveau_object *object, bool suspend)
+{
+ struct nouveau_falcon *falcon = (void *)object;
+
+ if (!suspend) {
+ nouveau_gpuobj_ref(NULL, &falcon->core);
+ if (falcon->external) {
+ kfree(falcon->data.data);
+ kfree(falcon->code.data);
+ falcon->code.data = NULL;
+ }
+ }
+
+ nv_mo32(falcon, 0x048, 0x00000003, 0x00000000);
+ nv_wo32(falcon, 0x014, 0xffffffff);
+
+ return nouveau_engine_fini(&falcon->base, suspend);
+}
+
+int
+nouveau_falcon_create_(struct nouveau_object *parent,
+ struct nouveau_object *engine,
+ struct nouveau_oclass *oclass, u32 addr, bool enable,
+ const char *iname, const char *fname,
+ int length, void **pobject)
+{
+ struct nouveau_falcon *falcon;
+ int ret;
+
+ ret = nouveau_engine_create_(parent, engine, oclass, enable, iname,
+ fname, length, pobject);
+ falcon = *pobject;
+ if (ret)
+ return ret;
+
+ falcon->addr = addr;
+ return 0;
+}
diff --git a/drivers/gpu/drm/nouveau/core/core/gpuobj.c b/drivers/gpu/drm/nouveau/core/core/gpuobj.c
index 70586fde69cf..560b2214cf1c 100644
--- a/drivers/gpu/drm/nouveau/core/core/gpuobj.c
+++ b/drivers/gpu/drm/nouveau/core/core/gpuobj.c
@@ -183,7 +183,7 @@ _nouveau_gpuobj_fini(struct nouveau_object *object, bool suspend)
}
u32
-_nouveau_gpuobj_rd32(struct nouveau_object *object, u32 addr)
+_nouveau_gpuobj_rd32(struct nouveau_object *object, u64 addr)
{
struct nouveau_gpuobj *gpuobj = nv_gpuobj(object);
struct nouveau_ofuncs *pfuncs = nv_ofuncs(gpuobj->parent);
@@ -193,7 +193,7 @@ _nouveau_gpuobj_rd32(struct nouveau_object *object, u32 addr)
}
void
-_nouveau_gpuobj_wr32(struct nouveau_object *object, u32 addr, u32 data)
+_nouveau_gpuobj_wr32(struct nouveau_object *object, u64 addr, u32 data)
{
struct nouveau_gpuobj *gpuobj = nv_gpuobj(object);
struct nouveau_ofuncs *pfuncs = nv_ofuncs(gpuobj->parent);
diff --git a/drivers/gpu/drm/nouveau/core/core/mm.c b/drivers/gpu/drm/nouveau/core/core/mm.c
index a6d3cd6490f7..0261a11b2ae0 100644
--- a/drivers/gpu/drm/nouveau/core/core/mm.c
+++ b/drivers/gpu/drm/nouveau/core/core/mm.c
@@ -234,15 +234,18 @@ nouveau_mm_init(struct nouveau_mm *mm, u32 offset, u32 length, u32 block)
int
nouveau_mm_fini(struct nouveau_mm *mm)
{
- struct nouveau_mm_node *node, *heap =
- list_first_entry(&mm->nodes, struct nouveau_mm_node, nl_entry);
- int nodes = 0;
+ if (nouveau_mm_initialised(mm)) {
+ struct nouveau_mm_node *node, *heap =
+ list_first_entry(&mm->nodes, typeof(*heap), nl_entry);
+ int nodes = 0;
+
+ list_for_each_entry(node, &mm->nodes, nl_entry) {
+ if (WARN_ON(nodes++ == mm->heap_nodes))
+ return -EBUSY;
+ }
- list_for_each_entry(node, &mm->nodes, nl_entry) {
- if (WARN_ON(nodes++ == mm->heap_nodes))
- return -EBUSY;
+ kfree(heap);
}
- kfree(heap);
return 0;
}
diff --git a/drivers/gpu/drm/nouveau/core/engine/bsp/nv84.c b/drivers/gpu/drm/nouveau/core/engine/bsp/nv84.c
index 66f7dfd907ee..1d9f614cb97d 100644
--- a/drivers/gpu/drm/nouveau/core/engine/bsp/nv84.c
+++ b/drivers/gpu/drm/nouveau/core/engine/bsp/nv84.c
@@ -22,18 +22,13 @@
* Authors: Ben Skeggs
*/
-#include <core/os.h>
-#include <core/class.h>
#include <core/engctx.h>
+#include <core/class.h>
#include <engine/bsp.h>
struct nv84_bsp_priv {
- struct nouveau_bsp base;
-};
-
-struct nv84_bsp_chan {
- struct nouveau_bsp_chan base;
+ struct nouveau_engine base;
};
/*******************************************************************************
@@ -49,61 +44,16 @@ nv84_bsp_sclass[] = {
* BSP context
******************************************************************************/
-static int
-nv84_bsp_context_ctor(struct nouveau_object *parent,
- struct nouveau_object *engine,
- struct nouveau_oclass *oclass, void *data, u32 size,
- struct nouveau_object **pobject)
-{
- struct nv84_bsp_chan *priv;
- int ret;
-
- ret = nouveau_bsp_context_create(parent, engine, oclass, NULL,
- 0, 0, 0, &priv);
- *pobject = nv_object(priv);
- if (ret)
- return ret;
-
- return 0;
-}
-
-static void
-nv84_bsp_context_dtor(struct nouveau_object *object)
-{
- struct nv84_bsp_chan *priv = (void *)object;
- nouveau_bsp_context_destroy(&priv->base);
-}
-
-static int
-nv84_bsp_context_init(struct nouveau_object *object)
-{
- struct nv84_bsp_chan *priv = (void *)object;
- int ret;
-
- ret = nouveau_bsp_context_init(&priv->base);
- if (ret)
- return ret;
-
- return 0;
-}
-
-static int
-nv84_bsp_context_fini(struct nouveau_object *object, bool suspend)
-{
- struct nv84_bsp_chan *priv = (void *)object;
- return nouveau_bsp_context_fini(&priv->base, suspend);
-}
-
static struct nouveau_oclass
nv84_bsp_cclass = {
.handle = NV_ENGCTX(BSP, 0x84),
.ofuncs = &(struct nouveau_ofuncs) {
- .ctor = nv84_bsp_context_ctor,
- .dtor = nv84_bsp_context_dtor,
- .init = nv84_bsp_context_init,
- .fini = nv84_bsp_context_fini,
- .rd32 = _nouveau_bsp_context_rd32,
- .wr32 = _nouveau_bsp_context_wr32,
+ .ctor = _nouveau_engctx_ctor,
+ .dtor = _nouveau_engctx_dtor,
+ .init = _nouveau_engctx_init,
+ .fini = _nouveau_engctx_fini,
+ .rd32 = _nouveau_engctx_rd32,
+ .wr32 = _nouveau_engctx_wr32,
},
};
@@ -111,11 +61,6 @@ nv84_bsp_cclass = {
* BSP engine/subdev functions
******************************************************************************/
-static void
-nv84_bsp_intr(struct nouveau_subdev *subdev)
-{
-}
-
static int
nv84_bsp_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
struct nouveau_oclass *oclass, void *data, u32 size,
@@ -124,52 +69,25 @@ nv84_bsp_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
struct nv84_bsp_priv *priv;
int ret;
- ret = nouveau_bsp_create(parent, engine, oclass, &priv);
+ ret = nouveau_engine_create(parent, engine, oclass, true,
+ "PBSP", "bsp", &priv);
*pobject = nv_object(priv);
if (ret)
return ret;
nv_subdev(priv)->unit = 0x04008000;
- nv_subdev(priv)->intr = nv84_bsp_intr;
nv_engine(priv)->cclass = &nv84_bsp_cclass;
nv_engine(priv)->sclass = nv84_bsp_sclass;
return 0;
}
-static void
-nv84_bsp_dtor(struct nouveau_object *object)
-{
- struct nv84_bsp_priv *priv = (void *)object;
- nouveau_bsp_destroy(&priv->base);
-}
-
-static int
-nv84_bsp_init(struct nouveau_object *object)
-{
- struct nv84_bsp_priv *priv = (void *)object;
- int ret;
-
- ret = nouveau_bsp_init(&priv->base);
- if (ret)
- return ret;
-
- return 0;
-}
-
-static int
-nv84_bsp_fini(struct nouveau_object *object, bool suspend)
-{
- struct nv84_bsp_priv *priv = (void *)object;
- return nouveau_bsp_fini(&priv->base, suspend);
-}
-
struct nouveau_oclass
nv84_bsp_oclass = {
.handle = NV_ENGINE(BSP, 0x84),
.ofuncs = &(struct nouveau_ofuncs) {
.ctor = nv84_bsp_ctor,
- .dtor = nv84_bsp_dtor,
- .init = nv84_bsp_init,
- .fini = nv84_bsp_fini,
+ .dtor = _nouveau_engine_dtor,
+ .init = _nouveau_engine_init,
+ .fini = _nouveau_engine_fini,
},
};
diff --git a/drivers/gpu/drm/nouveau/core/engine/bsp/nvc0.c b/drivers/gpu/drm/nouveau/core/engine/bsp/nvc0.c
new file mode 100644
index 000000000000..0a5aa6bb0870
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/engine/bsp/nvc0.c
@@ -0,0 +1,110 @@
+/*
+ * Copyright 2012 Maarten Lankhorst
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Maarten Lankhorst
+ */
+
+#include <core/falcon.h>
+
+#include <engine/bsp.h>
+
+struct nvc0_bsp_priv {
+ struct nouveau_falcon base;
+};
+
+/*******************************************************************************
+ * BSP object classes
+ ******************************************************************************/
+
+static struct nouveau_oclass
+nvc0_bsp_sclass[] = {
+ { 0x90b1, &nouveau_object_ofuncs },
+ {},
+};
+
+/*******************************************************************************
+ * PBSP context
+ ******************************************************************************/
+
+static struct nouveau_oclass
+nvc0_bsp_cclass = {
+ .handle = NV_ENGCTX(BSP, 0xc0),
+ .ofuncs = &(struct nouveau_ofuncs) {
+ .ctor = _nouveau_falcon_context_ctor,
+ .dtor = _nouveau_falcon_context_dtor,
+ .init = _nouveau_falcon_context_init,
+ .fini = _nouveau_falcon_context_fini,
+ .rd32 = _nouveau_falcon_context_rd32,
+ .wr32 = _nouveau_falcon_context_wr32,
+ },
+};
+
+/*******************************************************************************
+ * PBSP engine/subdev functions
+ ******************************************************************************/
+
+static int
+nvc0_bsp_init(struct nouveau_object *object)
+{
+ struct nvc0_bsp_priv *priv = (void *)object;
+ int ret;
+
+ ret = nouveau_falcon_init(&priv->base);
+ if (ret)
+ return ret;
+
+ nv_wr32(priv, 0x084010, 0x0000fff2);
+ nv_wr32(priv, 0x08401c, 0x0000fff2);
+ return 0;
+}
+
+static int
+nvc0_bsp_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
+ struct nouveau_oclass *oclass, void *data, u32 size,
+ struct nouveau_object **pobject)
+{
+ struct nvc0_bsp_priv *priv;
+ int ret;
+
+ ret = nouveau_falcon_create(parent, engine, oclass, 0x084000, true,
+ "PBSP", "bsp", &priv);
+ *pobject = nv_object(priv);
+ if (ret)
+ return ret;
+
+ nv_subdev(priv)->unit = 0x00008000;
+ nv_engine(priv)->cclass = &nvc0_bsp_cclass;
+ nv_engine(priv)->sclass = nvc0_bsp_sclass;
+ return 0;
+}
+
+struct nouveau_oclass
+nvc0_bsp_oclass = {
+ .handle = NV_ENGINE(BSP, 0xc0),
+ .ofuncs = &(struct nouveau_ofuncs) {
+ .ctor = nvc0_bsp_ctor,
+ .dtor = _nouveau_falcon_dtor,
+ .init = nvc0_bsp_init,
+ .fini = _nouveau_falcon_fini,
+ .rd32 = _nouveau_falcon_rd32,
+ .wr32 = _nouveau_falcon_wr32,
+ },
+};
diff --git a/drivers/gpu/drm/nouveau/core/engine/bsp/nve0.c b/drivers/gpu/drm/nouveau/core/engine/bsp/nve0.c
new file mode 100644
index 000000000000..d4f23bbd75b4
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/engine/bsp/nve0.c
@@ -0,0 +1,110 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include <core/falcon.h>
+
+#include <engine/bsp.h>
+
+struct nve0_bsp_priv {
+ struct nouveau_falcon base;
+};
+
+/*******************************************************************************
+ * BSP object classes
+ ******************************************************************************/
+
+static struct nouveau_oclass
+nve0_bsp_sclass[] = {
+ { 0x95b1, &nouveau_object_ofuncs },
+ {},
+};
+
+/*******************************************************************************
+ * PBSP context
+ ******************************************************************************/
+
+static struct nouveau_oclass
+nve0_bsp_cclass = {
+ .handle = NV_ENGCTX(BSP, 0xe0),
+ .ofuncs = &(struct nouveau_ofuncs) {
+ .ctor = _nouveau_falcon_context_ctor,
+ .dtor = _nouveau_falcon_context_dtor,
+ .init = _nouveau_falcon_context_init,
+ .fini = _nouveau_falcon_context_fini,
+ .rd32 = _nouveau_falcon_context_rd32,
+ .wr32 = _nouveau_falcon_context_wr32,
+ },
+};
+
+/*******************************************************************************
+ * PBSP engine/subdev functions
+ ******************************************************************************/
+
+static int
+nve0_bsp_init(struct nouveau_object *object)
+{
+ struct nve0_bsp_priv *priv = (void *)object;
+ int ret;
+
+ ret = nouveau_falcon_init(&priv->base);
+ if (ret)
+ return ret;
+
+ nv_wr32(priv, 0x084010, 0x0000fff2);
+ nv_wr32(priv, 0x08401c, 0x0000fff2);
+ return 0;
+}
+
+static int
+nve0_bsp_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
+ struct nouveau_oclass *oclass, void *data, u32 size,
+ struct nouveau_object **pobject)
+{
+ struct nve0_bsp_priv *priv;
+ int ret;
+
+ ret = nouveau_falcon_create(parent, engine, oclass, 0x084000, true,
+ "PBSP", "bsp", &priv);
+ *pobject = nv_object(priv);
+ if (ret)
+ return ret;
+
+ nv_subdev(priv)->unit = 0x00008000;
+ nv_engine(priv)->cclass = &nve0_bsp_cclass;
+ nv_engine(priv)->sclass = nve0_bsp_sclass;
+ return 0;
+}
+
+struct nouveau_oclass
+nve0_bsp_oclass = {
+ .handle = NV_ENGINE(BSP, 0xe0),
+ .ofuncs = &(struct nouveau_ofuncs) {
+ .ctor = nve0_bsp_ctor,
+ .dtor = _nouveau_falcon_dtor,
+ .init = nve0_bsp_init,
+ .fini = _nouveau_falcon_fini,
+ .rd32 = _nouveau_falcon_rd32,
+ .wr32 = _nouveau_falcon_wr32,
+ },
+};
diff --git a/drivers/gpu/drm/nouveau/core/engine/copy/nva3.c b/drivers/gpu/drm/nouveau/core/engine/copy/nva3.c
index 4df6da0af740..283248c7b050 100644
--- a/drivers/gpu/drm/nouveau/core/engine/copy/nva3.c
+++ b/drivers/gpu/drm/nouveau/core/engine/copy/nva3.c
@@ -22,10 +22,9 @@
* Authors: Ben Skeggs
*/
-#include <core/os.h>
-#include <core/enum.h>
+#include <core/falcon.h>
#include <core/class.h>
-#include <core/engctx.h>
+#include <core/enum.h>
#include <subdev/fb.h>
#include <subdev/vm.h>
@@ -36,11 +35,7 @@
#include "fuc/nva3.fuc.h"
struct nva3_copy_priv {
- struct nouveau_copy base;
-};
-
-struct nva3_copy_chan {
- struct nouveau_copy_chan base;
+ struct nouveau_falcon base;
};
/*******************************************************************************
@@ -57,34 +52,16 @@ nva3_copy_sclass[] = {
* PCOPY context
******************************************************************************/
-static int
-nva3_copy_context_ctor(struct nouveau_object *parent,
- struct nouveau_object *engine,
- struct nouveau_oclass *oclass, void *data, u32 size,
- struct nouveau_object **pobject)
-{
- struct nva3_copy_chan *priv;
- int ret;
-
- ret = nouveau_copy_context_create(parent, engine, oclass, NULL, 256, 0,
- NVOBJ_FLAG_ZERO_ALLOC, &priv);
- *pobject = nv_object(priv);
- if (ret)
- return ret;
-
- return 0;
-}
-
static struct nouveau_oclass
nva3_copy_cclass = {
.handle = NV_ENGCTX(COPY0, 0xa3),
.ofuncs = &(struct nouveau_ofuncs) {
- .ctor = nva3_copy_context_ctor,
- .dtor = _nouveau_copy_context_dtor,
- .init = _nouveau_copy_context_init,
- .fini = _nouveau_copy_context_fini,
- .rd32 = _nouveau_copy_context_rd32,
- .wr32 = _nouveau_copy_context_wr32,
+ .ctor = _nouveau_falcon_context_ctor,
+ .dtor = _nouveau_falcon_context_dtor,
+ .init = _nouveau_falcon_context_init,
+ .fini = _nouveau_falcon_context_fini,
+ .rd32 = _nouveau_falcon_context_rd32,
+ .wr32 = _nouveau_falcon_context_wr32,
},
};
@@ -100,41 +77,40 @@ static const struct nouveau_enum nva3_copy_isr_error_name[] = {
{}
};
-static void
+void
nva3_copy_intr(struct nouveau_subdev *subdev)
{
struct nouveau_fifo *pfifo = nouveau_fifo(subdev);
struct nouveau_engine *engine = nv_engine(subdev);
+ struct nouveau_falcon *falcon = (void *)subdev;
struct nouveau_object *engctx;
- struct nva3_copy_priv *priv = (void *)subdev;
- u32 dispatch = nv_rd32(priv, 0x10401c);
- u32 stat = nv_rd32(priv, 0x104008) & dispatch & ~(dispatch >> 16);
- u64 inst = nv_rd32(priv, 0x104050) & 0x3fffffff;
- u32 ssta = nv_rd32(priv, 0x104040) & 0x0000ffff;
- u32 addr = nv_rd32(priv, 0x104040) >> 16;
+ u32 dispatch = nv_ro32(falcon, 0x01c);
+ u32 stat = nv_ro32(falcon, 0x008) & dispatch & ~(dispatch >> 16);
+ u64 inst = nv_ro32(falcon, 0x050) & 0x3fffffff;
+ u32 ssta = nv_ro32(falcon, 0x040) & 0x0000ffff;
+ u32 addr = nv_ro32(falcon, 0x040) >> 16;
u32 mthd = (addr & 0x07ff) << 2;
u32 subc = (addr & 0x3800) >> 11;
- u32 data = nv_rd32(priv, 0x104044);
+ u32 data = nv_ro32(falcon, 0x044);
int chid;
engctx = nouveau_engctx_get(engine, inst);
chid = pfifo->chid(pfifo, engctx);
if (stat & 0x00000040) {
- nv_error(priv, "DISPATCH_ERROR [");
+ nv_error(falcon, "DISPATCH_ERROR [");
nouveau_enum_print(nva3_copy_isr_error_name, ssta);
printk("] ch %d [0x%010llx] subc %d mthd 0x%04x data 0x%08x\n",
chid, inst << 12, subc, mthd, data);
- nv_wr32(priv, 0x104004, 0x00000040);
+ nv_wo32(falcon, 0x004, 0x00000040);
stat &= ~0x00000040;
}
if (stat) {
- nv_error(priv, "unhandled intr 0x%08x\n", stat);
- nv_wr32(priv, 0x104004, stat);
+ nv_error(falcon, "unhandled intr 0x%08x\n", stat);
+ nv_wo32(falcon, 0x004, stat);
}
- nv50_fb_trap(nouveau_fb(priv), 1);
nouveau_engctx_put(engctx);
}
@@ -154,7 +130,8 @@ nva3_copy_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
struct nva3_copy_priv *priv;
int ret;
- ret = nouveau_copy_create(parent, engine, oclass, enable, 0, &priv);
+ ret = nouveau_falcon_create(parent, engine, oclass, 0x104000, enable,
+ "PCE0", "copy0", &priv);
*pobject = nv_object(priv);
if (ret)
return ret;
@@ -164,59 +141,22 @@ nva3_copy_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
nv_engine(priv)->cclass = &nva3_copy_cclass;
nv_engine(priv)->sclass = nva3_copy_sclass;
nv_engine(priv)->tlb_flush = nva3_copy_tlb_flush;
+ nv_falcon(priv)->code.data = nva3_pcopy_code;
+ nv_falcon(priv)->code.size = sizeof(nva3_pcopy_code);
+ nv_falcon(priv)->data.data = nva3_pcopy_data;
+ nv_falcon(priv)->data.size = sizeof(nva3_pcopy_data);
return 0;
}
-static int
-nva3_copy_init(struct nouveau_object *object)
-{
- struct nva3_copy_priv *priv = (void *)object;
- int ret, i;
-
- ret = nouveau_copy_init(&priv->base);
- if (ret)
- return ret;
-
- /* disable all interrupts */
- nv_wr32(priv, 0x104014, 0xffffffff);
-
- /* upload ucode */
- nv_wr32(priv, 0x1041c0, 0x01000000);
- for (i = 0; i < sizeof(nva3_pcopy_data) / 4; i++)
- nv_wr32(priv, 0x1041c4, nva3_pcopy_data[i]);
-
- nv_wr32(priv, 0x104180, 0x01000000);
- for (i = 0; i < sizeof(nva3_pcopy_code) / 4; i++) {
- if ((i & 0x3f) == 0)
- nv_wr32(priv, 0x104188, i >> 6);
- nv_wr32(priv, 0x104184, nva3_pcopy_code[i]);
- }
-
- /* start it running */
- nv_wr32(priv, 0x10410c, 0x00000000);
- nv_wr32(priv, 0x104104, 0x00000000); /* ENTRY */
- nv_wr32(priv, 0x104100, 0x00000002); /* TRIGGER */
- return 0;
-}
-
-static int
-nva3_copy_fini(struct nouveau_object *object, bool suspend)
-{
- struct nva3_copy_priv *priv = (void *)object;
-
- nv_mask(priv, 0x104048, 0x00000003, 0x00000000);
- nv_wr32(priv, 0x104014, 0xffffffff);
-
- return nouveau_copy_fini(&priv->base, suspend);
-}
-
struct nouveau_oclass
nva3_copy_oclass = {
.handle = NV_ENGINE(COPY0, 0xa3),
.ofuncs = &(struct nouveau_ofuncs) {
.ctor = nva3_copy_ctor,
- .dtor = _nouveau_copy_dtor,
- .init = nva3_copy_init,
- .fini = nva3_copy_fini,
+ .dtor = _nouveau_falcon_dtor,
+ .init = _nouveau_falcon_init,
+ .fini = _nouveau_falcon_fini,
+ .rd32 = _nouveau_falcon_rd32,
+ .wr32 = _nouveau_falcon_wr32,
},
};
diff --git a/drivers/gpu/drm/nouveau/core/engine/copy/nvc0.c b/drivers/gpu/drm/nouveau/core/engine/copy/nvc0.c
index 06d4a8791055..b3ed2737e21f 100644
--- a/drivers/gpu/drm/nouveau/core/engine/copy/nvc0.c
+++ b/drivers/gpu/drm/nouveau/core/engine/copy/nvc0.c
@@ -22,10 +22,9 @@
* Authors: Ben Skeggs
*/
-#include <core/os.h>
-#include <core/enum.h>
+#include <core/falcon.h>
#include <core/class.h>
-#include <core/engctx.h>
+#include <core/enum.h>
#include <engine/fifo.h>
#include <engine/copy.h>
@@ -33,11 +32,7 @@
#include "fuc/nvc0.fuc.h"
struct nvc0_copy_priv {
- struct nouveau_copy base;
-};
-
-struct nvc0_copy_chan {
- struct nouveau_copy_chan base;
+ struct nouveau_falcon base;
};
/*******************************************************************************
@@ -60,32 +55,14 @@ nvc0_copy1_sclass[] = {
* PCOPY context
******************************************************************************/
-static int
-nvc0_copy_context_ctor(struct nouveau_object *parent,
- struct nouveau_object *engine,
- struct nouveau_oclass *oclass, void *data, u32 size,
- struct nouveau_object **pobject)
-{
- struct nvc0_copy_chan *priv;
- int ret;
-
- ret = nouveau_copy_context_create(parent, engine, oclass, NULL, 256,
- 256, NVOBJ_FLAG_ZERO_ALLOC, &priv);
- *pobject = nv_object(priv);
- if (ret)
- return ret;
-
- return 0;
-}
-
static struct nouveau_ofuncs
nvc0_copy_context_ofuncs = {
- .ctor = nvc0_copy_context_ctor,
- .dtor = _nouveau_copy_context_dtor,
- .init = _nouveau_copy_context_init,
- .fini = _nouveau_copy_context_fini,
- .rd32 = _nouveau_copy_context_rd32,
- .wr32 = _nouveau_copy_context_wr32,
+ .ctor = _nouveau_falcon_context_ctor,
+ .dtor = _nouveau_falcon_context_dtor,
+ .init = _nouveau_falcon_context_init,
+ .fini = _nouveau_falcon_context_fini,
+ .rd32 = _nouveau_falcon_context_rd32,
+ .wr32 = _nouveau_falcon_context_wr32,
};
static struct nouveau_oclass
@@ -104,50 +81,18 @@ nvc0_copy1_cclass = {
* PCOPY engine/subdev functions
******************************************************************************/
-static const struct nouveau_enum nvc0_copy_isr_error_name[] = {
- { 0x0001, "ILLEGAL_MTHD" },
- { 0x0002, "INVALID_ENUM" },
- { 0x0003, "INVALID_BITFIELD" },
- {}
-};
-
-static void
-nvc0_copy_intr(struct nouveau_subdev *subdev)
+static int
+nvc0_copy_init(struct nouveau_object *object)
{
- struct nouveau_fifo *pfifo = nouveau_fifo(subdev);
- struct nouveau_engine *engine = nv_engine(subdev);
- struct nouveau_object *engctx;
- int idx = nv_engidx(nv_object(subdev)) - NVDEV_ENGINE_COPY0;
- struct nvc0_copy_priv *priv = (void *)subdev;
- u32 disp = nv_rd32(priv, 0x10401c + (idx * 0x1000));
- u32 intr = nv_rd32(priv, 0x104008 + (idx * 0x1000));
- u32 stat = intr & disp & ~(disp >> 16);
- u64 inst = nv_rd32(priv, 0x104050 + (idx * 0x1000)) & 0x0fffffff;
- u32 ssta = nv_rd32(priv, 0x104040 + (idx * 0x1000)) & 0x0000ffff;
- u32 addr = nv_rd32(priv, 0x104040 + (idx * 0x1000)) >> 16;
- u32 mthd = (addr & 0x07ff) << 2;
- u32 subc = (addr & 0x3800) >> 11;
- u32 data = nv_rd32(priv, 0x104044 + (idx * 0x1000));
- int chid;
-
- engctx = nouveau_engctx_get(engine, inst);
- chid = pfifo->chid(pfifo, engctx);
-
- if (stat & 0x00000040) {
- nv_error(priv, "DISPATCH_ERROR [");
- nouveau_enum_print(nvc0_copy_isr_error_name, ssta);
- printk("] ch %d [0x%010llx] subc %d mthd 0x%04x data 0x%08x\n",
- chid, (u64)inst << 12, subc, mthd, data);
- nv_wr32(priv, 0x104004 + (idx * 0x1000), 0x00000040);
- stat &= ~0x00000040;
- }
+ struct nvc0_copy_priv *priv = (void *)object;
+ int ret;
- if (stat) {
- nv_error(priv, "unhandled intr 0x%08x\n", stat);
- nv_wr32(priv, 0x104004 + (idx * 0x1000), stat);
- }
+ ret = nouveau_falcon_init(&priv->base);
+ if (ret)
+ return ret;
- nouveau_engctx_put(engctx);
+ nv_wo32(priv, 0x084, nv_engidx(object) - NVDEV_ENGINE_COPY0);
+ return 0;
}
static int
@@ -161,15 +106,20 @@ nvc0_copy0_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
if (nv_rd32(parent, 0x022500) & 0x00000100)
return -ENODEV;
- ret = nouveau_copy_create(parent, engine, oclass, true, 0, &priv);
+ ret = nouveau_falcon_create(parent, engine, oclass, 0x104000, true,
+ "PCE0", "copy0", &priv);
*pobject = nv_object(priv);
if (ret)
return ret;
nv_subdev(priv)->unit = 0x00000040;
- nv_subdev(priv)->intr = nvc0_copy_intr;
+ nv_subdev(priv)->intr = nva3_copy_intr;
nv_engine(priv)->cclass = &nvc0_copy0_cclass;
nv_engine(priv)->sclass = nvc0_copy0_sclass;
+ nv_falcon(priv)->code.data = nvc0_pcopy_code;
+ nv_falcon(priv)->code.size = sizeof(nvc0_pcopy_code);
+ nv_falcon(priv)->data.data = nvc0_pcopy_data;
+ nv_falcon(priv)->data.size = sizeof(nvc0_pcopy_data);
return 0;
}
@@ -184,72 +134,33 @@ nvc0_copy1_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
if (nv_rd32(parent, 0x022500) & 0x00000200)
return -ENODEV;
- ret = nouveau_copy_create(parent, engine, oclass, true, 1, &priv);
+ ret = nouveau_falcon_create(parent, engine, oclass, 0x105000, true,
+ "PCE1", "copy1", &priv);
*pobject = nv_object(priv);
if (ret)
return ret;
nv_subdev(priv)->unit = 0x00000080;
- nv_subdev(priv)->intr = nvc0_copy_intr;
+ nv_subdev(priv)->intr = nva3_copy_intr;
nv_engine(priv)->cclass = &nvc0_copy1_cclass;
nv_engine(priv)->sclass = nvc0_copy1_sclass;
+ nv_falcon(priv)->code.data = nvc0_pcopy_code;
+ nv_falcon(priv)->code.size = sizeof(nvc0_pcopy_code);
+ nv_falcon(priv)->data.data = nvc0_pcopy_data;
+ nv_falcon(priv)->data.size = sizeof(nvc0_pcopy_data);
return 0;
}
-static int
-nvc0_copy_init(struct nouveau_object *object)
-{
- int idx = nv_engidx(object) - NVDEV_ENGINE_COPY0;
- struct nvc0_copy_priv *priv = (void *)object;
- int ret, i;
-
- ret = nouveau_copy_init(&priv->base);
- if (ret)
- return ret;
-
- /* disable all interrupts */
- nv_wr32(priv, 0x104014 + (idx * 0x1000), 0xffffffff);
-
- /* upload ucode */
- nv_wr32(priv, 0x1041c0 + (idx * 0x1000), 0x01000000);
- for (i = 0; i < sizeof(nvc0_pcopy_data) / 4; i++)
- nv_wr32(priv, 0x1041c4 + (idx * 0x1000), nvc0_pcopy_data[i]);
-
- nv_wr32(priv, 0x104180 + (idx * 0x1000), 0x01000000);
- for (i = 0; i < sizeof(nvc0_pcopy_code) / 4; i++) {
- if ((i & 0x3f) == 0)
- nv_wr32(priv, 0x104188 + (idx * 0x1000), i >> 6);
- nv_wr32(priv, 0x104184 + (idx * 0x1000), nvc0_pcopy_code[i]);
- }
-
- /* start it running */
- nv_wr32(priv, 0x104084 + (idx * 0x1000), idx);
- nv_wr32(priv, 0x10410c + (idx * 0x1000), 0x00000000);
- nv_wr32(priv, 0x104104 + (idx * 0x1000), 0x00000000); /* ENTRY */
- nv_wr32(priv, 0x104100 + (idx * 0x1000), 0x00000002); /* TRIGGER */
- return 0;
-}
-
-static int
-nvc0_copy_fini(struct nouveau_object *object, bool suspend)
-{
- int idx = nv_engidx(object) - NVDEV_ENGINE_COPY0;
- struct nvc0_copy_priv *priv = (void *)object;
-
- nv_mask(priv, 0x104048 + (idx * 0x1000), 0x00000003, 0x00000000);
- nv_wr32(priv, 0x104014 + (idx * 0x1000), 0xffffffff);
-
- return nouveau_copy_fini(&priv->base, suspend);
-}
-
struct nouveau_oclass
nvc0_copy0_oclass = {
.handle = NV_ENGINE(COPY0, 0xc0),
.ofuncs = &(struct nouveau_ofuncs) {
.ctor = nvc0_copy0_ctor,
- .dtor = _nouveau_copy_dtor,
+ .dtor = _nouveau_falcon_dtor,
.init = nvc0_copy_init,
- .fini = nvc0_copy_fini,
+ .fini = _nouveau_falcon_fini,
+ .rd32 = _nouveau_falcon_rd32,
+ .wr32 = _nouveau_falcon_wr32,
},
};
@@ -258,8 +169,10 @@ nvc0_copy1_oclass = {
.handle = NV_ENGINE(COPY1, 0xc0),
.ofuncs = &(struct nouveau_ofuncs) {
.ctor = nvc0_copy1_ctor,
- .dtor = _nouveau_copy_dtor,
+ .dtor = _nouveau_falcon_dtor,
.init = nvc0_copy_init,
- .fini = nvc0_copy_fini,
+ .fini = _nouveau_falcon_fini,
+ .rd32 = _nouveau_falcon_rd32,
+ .wr32 = _nouveau_falcon_wr32,
},
};
diff --git a/drivers/gpu/drm/nouveau/core/engine/copy/nve0.c b/drivers/gpu/drm/nouveau/core/engine/copy/nve0.c
index 2017c1579ac5..dbbe9e8998fe 100644
--- a/drivers/gpu/drm/nouveau/core/engine/copy/nve0.c
+++ b/drivers/gpu/drm/nouveau/core/engine/copy/nve0.c
@@ -30,11 +30,7 @@
#include <engine/copy.h>
struct nve0_copy_priv {
- struct nouveau_copy base;
-};
-
-struct nve0_copy_chan {
- struct nouveau_copy_chan base;
+ struct nouveau_engine base;
};
/*******************************************************************************
@@ -51,32 +47,14 @@ nve0_copy_sclass[] = {
* PCOPY context
******************************************************************************/
-static int
-nve0_copy_context_ctor(struct nouveau_object *parent,
- struct nouveau_object *engine,
- struct nouveau_oclass *oclass, void *data, u32 size,
- struct nouveau_object **pobject)
-{
- struct nve0_copy_chan *priv;
- int ret;
-
- ret = nouveau_copy_context_create(parent, engine, oclass, NULL, 256,
- 256, NVOBJ_FLAG_ZERO_ALLOC, &priv);
- *pobject = nv_object(priv);
- if (ret)
- return ret;
-
- return 0;
-}
-
static struct nouveau_ofuncs
nve0_copy_context_ofuncs = {
- .ctor = nve0_copy_context_ctor,
- .dtor = _nouveau_copy_context_dtor,
- .init = _nouveau_copy_context_init,
- .fini = _nouveau_copy_context_fini,
- .rd32 = _nouveau_copy_context_rd32,
- .wr32 = _nouveau_copy_context_wr32,
+ .ctor = _nouveau_engctx_ctor,
+ .dtor = _nouveau_engctx_dtor,
+ .init = _nouveau_engctx_init,
+ .fini = _nouveau_engctx_fini,
+ .rd32 = _nouveau_engctx_rd32,
+ .wr32 = _nouveau_engctx_wr32,
};
static struct nouveau_oclass
@@ -100,7 +78,8 @@ nve0_copy0_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
if (nv_rd32(parent, 0x022500) & 0x00000100)
return -ENODEV;
- ret = nouveau_copy_create(parent, engine, oclass, true, 0, &priv);
+ ret = nouveau_engine_create(parent, engine, oclass, true,
+ "PCE0", "copy0", &priv);
*pobject = nv_object(priv);
if (ret)
return ret;
@@ -122,7 +101,8 @@ nve0_copy1_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
if (nv_rd32(parent, 0x022500) & 0x00000200)
return -ENODEV;
- ret = nouveau_copy_create(parent, engine, oclass, true, 1, &priv);
+ ret = nouveau_engine_create(parent, engine, oclass, true,
+ "PCE1", "copy1", &priv);
*pobject = nv_object(priv);
if (ret)
return ret;
@@ -138,9 +118,9 @@ nve0_copy0_oclass = {
.handle = NV_ENGINE(COPY0, 0xe0),
.ofuncs = &(struct nouveau_ofuncs) {
.ctor = nve0_copy0_ctor,
- .dtor = _nouveau_copy_dtor,
- .init = _nouveau_copy_init,
- .fini = _nouveau_copy_fini,
+ .dtor = _nouveau_engine_dtor,
+ .init = _nouveau_engine_init,
+ .fini = _nouveau_engine_fini,
},
};
@@ -149,8 +129,8 @@ nve0_copy1_oclass = {
.handle = NV_ENGINE(COPY1, 0xe0),
.ofuncs = &(struct nouveau_ofuncs) {
.ctor = nve0_copy1_ctor,
- .dtor = _nouveau_copy_dtor,
- .init = _nouveau_copy_init,
- .fini = _nouveau_copy_fini,
+ .dtor = _nouveau_engine_dtor,
+ .init = _nouveau_engine_init,
+ .fini = _nouveau_engine_fini,
},
};
diff --git a/drivers/gpu/drm/nouveau/core/engine/crypt/nv84.c b/drivers/gpu/drm/nouveau/core/engine/crypt/nv84.c
index 1d85e5b66ca0..b97490512723 100644
--- a/drivers/gpu/drm/nouveau/core/engine/crypt/nv84.c
+++ b/drivers/gpu/drm/nouveau/core/engine/crypt/nv84.c
@@ -34,11 +34,7 @@
#include <engine/crypt.h>
struct nv84_crypt_priv {
- struct nouveau_crypt base;
-};
-
-struct nv84_crypt_chan {
- struct nouveau_crypt_chan base;
+ struct nouveau_engine base;
};
/*******************************************************************************
@@ -87,34 +83,16 @@ nv84_crypt_sclass[] = {
* PCRYPT context
******************************************************************************/
-static int
-nv84_crypt_context_ctor(struct nouveau_object *parent,
- struct nouveau_object *engine,
- struct nouveau_oclass *oclass, void *data, u32 size,
- struct nouveau_object **pobject)
-{
- struct nv84_crypt_chan *priv;
- int ret;
-
- ret = nouveau_crypt_context_create(parent, engine, oclass, NULL, 256,
- 0, NVOBJ_FLAG_ZERO_ALLOC, &priv);
- *pobject = nv_object(priv);
- if (ret)
- return ret;
-
- return 0;
-}
-
static struct nouveau_oclass
nv84_crypt_cclass = {
.handle = NV_ENGCTX(CRYPT, 0x84),
.ofuncs = &(struct nouveau_ofuncs) {
- .ctor = nv84_crypt_context_ctor,
- .dtor = _nouveau_crypt_context_dtor,
- .init = _nouveau_crypt_context_init,
- .fini = _nouveau_crypt_context_fini,
- .rd32 = _nouveau_crypt_context_rd32,
- .wr32 = _nouveau_crypt_context_wr32,
+ .ctor = _nouveau_engctx_ctor,
+ .dtor = _nouveau_engctx_dtor,
+ .init = _nouveau_engctx_init,
+ .fini = _nouveau_engctx_fini,
+ .rd32 = _nouveau_engctx_rd32,
+ .wr32 = _nouveau_engctx_wr32,
},
};
@@ -157,7 +135,6 @@ nv84_crypt_intr(struct nouveau_subdev *subdev)
nv_wr32(priv, 0x102130, stat);
nv_wr32(priv, 0x10200c, 0x10);
- nv50_fb_trap(nouveau_fb(priv), 1);
nouveau_engctx_put(engctx);
}
@@ -176,7 +153,8 @@ nv84_crypt_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
struct nv84_crypt_priv *priv;
int ret;
- ret = nouveau_crypt_create(parent, engine, oclass, &priv);
+ ret = nouveau_engine_create(parent, engine, oclass, true,
+ "PCRYPT", "crypt", &priv);
*pobject = nv_object(priv);
if (ret)
return ret;
@@ -195,7 +173,7 @@ nv84_crypt_init(struct nouveau_object *object)
struct nv84_crypt_priv *priv = (void *)object;
int ret;
- ret = nouveau_crypt_init(&priv->base);
+ ret = nouveau_engine_init(&priv->base);
if (ret)
return ret;
@@ -210,8 +188,8 @@ nv84_crypt_oclass = {
.handle = NV_ENGINE(CRYPT, 0x84),
.ofuncs = &(struct nouveau_ofuncs) {
.ctor = nv84_crypt_ctor,
- .dtor = _nouveau_crypt_dtor,
+ .dtor = _nouveau_engine_dtor,
.init = nv84_crypt_init,
- .fini = _nouveau_crypt_fini,
+ .fini = _nouveau_engine_fini,
},
};
diff --git a/drivers/gpu/drm/nouveau/core/engine/crypt/nv98.c b/drivers/gpu/drm/nouveau/core/engine/crypt/nv98.c
index 9e3876c89b96..21986f3bf0c8 100644
--- a/drivers/gpu/drm/nouveau/core/engine/crypt/nv98.c
+++ b/drivers/gpu/drm/nouveau/core/engine/crypt/nv98.c
@@ -26,6 +26,7 @@
#include <core/enum.h>
#include <core/class.h>
#include <core/engctx.h>
+#include <core/falcon.h>
#include <subdev/timer.h>
#include <subdev/fb.h>
@@ -36,11 +37,7 @@
#include "fuc/nv98.fuc.h"
struct nv98_crypt_priv {
- struct nouveau_crypt base;
-};
-
-struct nv98_crypt_chan {
- struct nouveau_crypt_chan base;
+ struct nouveau_falcon base;
};
/*******************************************************************************
@@ -57,34 +54,16 @@ nv98_crypt_sclass[] = {
* PCRYPT context
******************************************************************************/
-static int
-nv98_crypt_context_ctor(struct nouveau_object *parent,
- struct nouveau_object *engine,
- struct nouveau_oclass *oclass, void *data, u32 size,
- struct nouveau_object **pobject)
-{
- struct nv98_crypt_chan *priv;
- int ret;
-
- ret = nouveau_crypt_context_create(parent, engine, oclass, NULL, 256,
- 256, NVOBJ_FLAG_ZERO_ALLOC, &priv);
- *pobject = nv_object(priv);
- if (ret)
- return ret;
-
- return 0;
-}
-
static struct nouveau_oclass
nv98_crypt_cclass = {
.handle = NV_ENGCTX(CRYPT, 0x98),
.ofuncs = &(struct nouveau_ofuncs) {
- .ctor = nv98_crypt_context_ctor,
- .dtor = _nouveau_crypt_context_dtor,
- .init = _nouveau_crypt_context_init,
- .fini = _nouveau_crypt_context_fini,
- .rd32 = _nouveau_crypt_context_rd32,
- .wr32 = _nouveau_crypt_context_wr32,
+ .ctor = _nouveau_falcon_context_ctor,
+ .dtor = _nouveau_falcon_context_dtor,
+ .init = _nouveau_falcon_context_init,
+ .fini = _nouveau_falcon_context_fini,
+ .rd32 = _nouveau_falcon_context_rd32,
+ .wr32 = _nouveau_falcon_context_wr32,
},
};
@@ -134,7 +113,6 @@ nv98_crypt_intr(struct nouveau_subdev *subdev)
nv_wr32(priv, 0x087004, stat);
}
- nv50_fb_trap(nouveau_fb(priv), 1);
nouveau_engctx_put(engctx);
}
@@ -153,7 +131,8 @@ nv98_crypt_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
struct nv98_crypt_priv *priv;
int ret;
- ret = nouveau_crypt_create(parent, engine, oclass, &priv);
+ ret = nouveau_falcon_create(parent, engine, oclass, 0x087000, true,
+ "PCRYPT", "crypt", &priv);
*pobject = nv_object(priv);
if (ret)
return ret;
@@ -163,36 +142,10 @@ nv98_crypt_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
nv_engine(priv)->cclass = &nv98_crypt_cclass;
nv_engine(priv)->sclass = nv98_crypt_sclass;
nv_engine(priv)->tlb_flush = nv98_crypt_tlb_flush;
- return 0;
-}
-
-static int
-nv98_crypt_init(struct nouveau_object *object)
-{
- struct nv98_crypt_priv *priv = (void *)object;
- int ret, i;
-
- ret = nouveau_crypt_init(&priv->base);
- if (ret)
- return ret;
-
- /* wait for exit interrupt to signal */
- nv_wait(priv, 0x087008, 0x00000010, 0x00000010);
- nv_wr32(priv, 0x087004, 0x00000010);
-
- /* upload microcode code and data segments */
- nv_wr32(priv, 0x087ff8, 0x00100000);
- for (i = 0; i < ARRAY_SIZE(nv98_pcrypt_code); i++)
- nv_wr32(priv, 0x087ff4, nv98_pcrypt_code[i]);
-
- nv_wr32(priv, 0x087ff8, 0x00000000);
- for (i = 0; i < ARRAY_SIZE(nv98_pcrypt_data); i++)
- nv_wr32(priv, 0x087ff4, nv98_pcrypt_data[i]);
-
- /* start it running */
- nv_wr32(priv, 0x08710c, 0x00000000);
- nv_wr32(priv, 0x087104, 0x00000000); /* ENTRY */
- nv_wr32(priv, 0x087100, 0x00000002); /* TRIGGER */
+ nv_falcon(priv)->code.data = nv98_pcrypt_code;
+ nv_falcon(priv)->code.size = sizeof(nv98_pcrypt_code);
+ nv_falcon(priv)->data.data = nv98_pcrypt_data;
+ nv_falcon(priv)->data.size = sizeof(nv98_pcrypt_data);
return 0;
}
@@ -201,8 +154,10 @@ nv98_crypt_oclass = {
.handle = NV_ENGINE(CRYPT, 0x98),
.ofuncs = &(struct nouveau_ofuncs) {
.ctor = nv98_crypt_ctor,
- .dtor = _nouveau_crypt_dtor,
- .init = nv98_crypt_init,
- .fini = _nouveau_crypt_fini,
+ .dtor = _nouveau_falcon_dtor,
+ .init = _nouveau_falcon_init,
+ .fini = _nouveau_falcon_fini,
+ .rd32 = _nouveau_falcon_rd32,
+ .wr32 = _nouveau_falcon_wr32,
},
};
diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/dacnv50.c b/drivers/gpu/drm/nouveau/core/engine/disp/dacnv50.c
new file mode 100644
index 000000000000..d0817d94454c
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/engine/disp/dacnv50.c
@@ -0,0 +1,88 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include <core/os.h>
+#include <core/class.h>
+
+#include <subdev/bios.h>
+#include <subdev/bios/dcb.h>
+#include <subdev/timer.h>
+
+#include "nv50.h"
+
+int
+nv50_dac_power(struct nv50_disp_priv *priv, int or, u32 data)
+{
+ const u32 stat = (data & NV50_DISP_DAC_PWR_HSYNC) |
+ (data & NV50_DISP_DAC_PWR_VSYNC) |
+ (data & NV50_DISP_DAC_PWR_DATA) |
+ (data & NV50_DISP_DAC_PWR_STATE);
+ const u32 doff = (or * 0x800);
+ nv_wait(priv, 0x61a004 + doff, 0x80000000, 0x00000000);
+ nv_mask(priv, 0x61a004 + doff, 0xc000007f, 0x80000000 | stat);
+ nv_wait(priv, 0x61a004 + doff, 0x80000000, 0x00000000);
+ return 0;
+}
+
+int
+nv50_dac_sense(struct nv50_disp_priv *priv, int or, u32 loadval)
+{
+ const u32 doff = (or * 0x800);
+ int load = -EINVAL;
+ nv_wr32(priv, 0x61a00c + doff, 0x00100000 | loadval);
+ udelay(9500);
+ nv_wr32(priv, 0x61a00c + doff, 0x80000000);
+ load = (nv_rd32(priv, 0x61a00c + doff) & 0x38000000) >> 27;
+ nv_wr32(priv, 0x61a00c + doff, 0x00000000);
+ return load;
+}
+
+int
+nv50_dac_mthd(struct nouveau_object *object, u32 mthd, void *args, u32 size)
+{
+ struct nv50_disp_priv *priv = (void *)object->engine;
+ const u8 or = (mthd & NV50_DISP_DAC_MTHD_OR);
+ u32 *data = args;
+ int ret;
+
+ if (size < sizeof(u32))
+ return -EINVAL;
+
+ switch (mthd & ~0x3f) {
+ case NV50_DISP_DAC_PWR:
+ ret = priv->dac.power(priv, or, data[0]);
+ break;
+ case NV50_DISP_DAC_LOAD:
+ ret = priv->dac.sense(priv, or, data[0]);
+ if (ret >= 0) {
+ data[0] = ret;
+ ret = 0;
+ }
+ break;
+ default:
+ BUG_ON(1);
+ }
+
+ return ret;
+}
diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/hdanva3.c b/drivers/gpu/drm/nouveau/core/engine/disp/hdanva3.c
new file mode 100644
index 000000000000..373dbcc523b2
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/engine/disp/hdanva3.c
@@ -0,0 +1,48 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include <core/os.h>
+#include <core/class.h>
+
+#include "nv50.h"
+
+int
+nva3_hda_eld(struct nv50_disp_priv *priv, int or, u8 *data, u32 size)
+{
+ const u32 soff = (or * 0x800);
+ int i;
+
+ if (data && data[0]) {
+ for (i = 0; i < size; i++)
+ nv_wr32(priv, 0x61c440 + soff, (i << 8) | data[i]);
+ nv_mask(priv, 0x61c448 + soff, 0x80000003, 0x80000003);
+ } else
+ if (data) {
+ nv_mask(priv, 0x61c448 + soff, 0x80000003, 0x80000001);
+ } else {
+ nv_mask(priv, 0x61c448 + soff, 0x80000003, 0x80000000);
+ }
+
+ return 0;
+}
diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/hdanvd0.c b/drivers/gpu/drm/nouveau/core/engine/disp/hdanvd0.c
new file mode 100644
index 000000000000..dc57e24fc1df
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/engine/disp/hdanvd0.c
@@ -0,0 +1,53 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include <core/os.h>
+#include <core/class.h>
+
+#include <subdev/bios.h>
+#include <subdev/bios/dcb.h>
+#include <subdev/bios/dp.h>
+#include <subdev/bios/init.h>
+
+#include "nv50.h"
+
+int
+nvd0_hda_eld(struct nv50_disp_priv *priv, int or, u8 *data, u32 size)
+{
+ const u32 soff = (or * 0x030);
+ int i;
+
+ if (data && data[0]) {
+ for (i = 0; i < size; i++)
+ nv_wr32(priv, 0x10ec00 + soff, (i << 8) | data[i]);
+ nv_mask(priv, 0x10ec10 + soff, 0x80000003, 0x80000003);
+ } else
+ if (data) {
+ nv_mask(priv, 0x10ec10 + soff, 0x80000003, 0x80000001);
+ } else {
+ nv_mask(priv, 0x10ec10 + soff, 0x80000003, 0x80000000);
+ }
+
+ return 0;
+}
diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/hdminv84.c b/drivers/gpu/drm/nouveau/core/engine/disp/hdminv84.c
new file mode 100644
index 000000000000..0d36bdc51417
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/engine/disp/hdminv84.c
@@ -0,0 +1,66 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include <core/os.h>
+#include <core/class.h>
+
+#include "nv50.h"
+
+int
+nv84_hdmi_ctrl(struct nv50_disp_priv *priv, int head, int or, u32 data)
+{
+ const u32 hoff = (head * 0x800);
+
+ if (!(data & NV84_DISP_SOR_HDMI_PWR_STATE_ON)) {
+ nv_mask(priv, 0x6165a4 + hoff, 0x40000000, 0x00000000);
+ nv_mask(priv, 0x616520 + hoff, 0x00000001, 0x00000000);
+ nv_mask(priv, 0x616500 + hoff, 0x00000001, 0x00000000);
+ return 0;
+ }
+
+ /* AVI InfoFrame */
+ nv_mask(priv, 0x616520 + hoff, 0x00000001, 0x00000000);
+ nv_wr32(priv, 0x616528 + hoff, 0x000d0282);
+ nv_wr32(priv, 0x61652c + hoff, 0x0000006f);
+ nv_wr32(priv, 0x616530 + hoff, 0x00000000);
+ nv_wr32(priv, 0x616534 + hoff, 0x00000000);
+ nv_wr32(priv, 0x616538 + hoff, 0x00000000);
+ nv_mask(priv, 0x616520 + hoff, 0x00000001, 0x00000001);
+
+ /* Audio InfoFrame */
+ nv_mask(priv, 0x616500 + hoff, 0x00000001, 0x00000000);
+ nv_wr32(priv, 0x616508 + hoff, 0x000a0184);
+ nv_wr32(priv, 0x61650c + hoff, 0x00000071);
+ nv_wr32(priv, 0x616510 + hoff, 0x00000000);
+ nv_mask(priv, 0x616500 + hoff, 0x00000001, 0x00000001);
+
+ /* ??? */
+ nv_mask(priv, 0x61733c, 0x00100000, 0x00100000); /* RESETF */
+ nv_mask(priv, 0x61733c, 0x10000000, 0x10000000); /* LOOKUP_EN */
+ nv_mask(priv, 0x61733c, 0x00100000, 0x00000000); /* !RESETF */
+
+ /* HDMI_CTRL */
+ nv_mask(priv, 0x6165a4 + hoff, 0x5f1f007f, data | 0x1f000000 /* ??? */);
+ return 0;
+}
diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/hdminva3.c b/drivers/gpu/drm/nouveau/core/engine/disp/hdminva3.c
new file mode 100644
index 000000000000..f065fc248adf
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/engine/disp/hdminva3.c
@@ -0,0 +1,66 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include <core/os.h>
+#include <core/class.h>
+
+#include "nv50.h"
+
+int
+nva3_hdmi_ctrl(struct nv50_disp_priv *priv, int head, int or, u32 data)
+{
+ const u32 soff = (or * 0x800);
+
+ if (!(data & NV84_DISP_SOR_HDMI_PWR_STATE_ON)) {
+ nv_mask(priv, 0x61c5a4 + soff, 0x40000000, 0x00000000);
+ nv_mask(priv, 0x61c520 + soff, 0x00000001, 0x00000000);
+ nv_mask(priv, 0x61c500 + soff, 0x00000001, 0x00000000);
+ return 0;
+ }
+
+ /* AVI InfoFrame */
+ nv_mask(priv, 0x61c520 + soff, 0x00000001, 0x00000000);
+ nv_wr32(priv, 0x61c528 + soff, 0x000d0282);
+ nv_wr32(priv, 0x61c52c + soff, 0x0000006f);
+ nv_wr32(priv, 0x61c530 + soff, 0x00000000);
+ nv_wr32(priv, 0x61c534 + soff, 0x00000000);
+ nv_wr32(priv, 0x61c538 + soff, 0x00000000);
+ nv_mask(priv, 0x61c520 + soff, 0x00000001, 0x00000001);
+
+ /* Audio InfoFrame */
+ nv_mask(priv, 0x61c500 + soff, 0x00000001, 0x00000000);
+ nv_wr32(priv, 0x61c508 + soff, 0x000a0184);
+ nv_wr32(priv, 0x61c50c + soff, 0x00000071);
+ nv_wr32(priv, 0x61c510 + soff, 0x00000000);
+ nv_mask(priv, 0x61c500 + soff, 0x00000001, 0x00000001);
+
+ /* ??? */
+ nv_mask(priv, 0x61733c, 0x00100000, 0x00100000); /* RESETF */
+ nv_mask(priv, 0x61733c, 0x10000000, 0x10000000); /* LOOKUP_EN */
+ nv_mask(priv, 0x61733c, 0x00100000, 0x00000000); /* !RESETF */
+
+ /* HDMI_CTRL */
+ nv_mask(priv, 0x61c5a4 + soff, 0x5f1f007f, data | 0x1f000000 /* ??? */);
+ return 0;
+}
diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/hdminvd0.c b/drivers/gpu/drm/nouveau/core/engine/disp/hdminvd0.c
new file mode 100644
index 000000000000..5151bb261832
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/engine/disp/hdminvd0.c
@@ -0,0 +1,62 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include <core/os.h>
+#include <core/class.h>
+
+#include "nv50.h"
+
+int
+nvd0_hdmi_ctrl(struct nv50_disp_priv *priv, int head, int or, u32 data)
+{
+ const u32 hoff = (head * 0x800);
+
+ if (!(data & NV84_DISP_SOR_HDMI_PWR_STATE_ON)) {
+ nv_mask(priv, 0x616798 + hoff, 0x40000000, 0x00000000);
+ nv_mask(priv, 0x6167a4 + hoff, 0x00000001, 0x00000000);
+ nv_mask(priv, 0x616714 + hoff, 0x00000001, 0x00000000);
+ return 0;
+ }
+
+ /* AVI InfoFrame */
+ nv_mask(priv, 0x616714 + hoff, 0x00000001, 0x00000000);
+ nv_wr32(priv, 0x61671c + hoff, 0x000d0282);
+ nv_wr32(priv, 0x616720 + hoff, 0x0000006f);
+ nv_wr32(priv, 0x616724 + hoff, 0x00000000);
+ nv_wr32(priv, 0x616728 + hoff, 0x00000000);
+ nv_wr32(priv, 0x61672c + hoff, 0x00000000);
+ nv_mask(priv, 0x616714 + hoff, 0x00000001, 0x00000001);
+
+ /* ??? InfoFrame? */
+ nv_mask(priv, 0x6167a4 + hoff, 0x00000001, 0x00000000);
+ nv_wr32(priv, 0x6167ac + hoff, 0x00000010);
+ nv_mask(priv, 0x6167a4 + hoff, 0x00000001, 0x00000001);
+
+ /* HDMI_CTRL */
+ nv_mask(priv, 0x616798 + hoff, 0x401f007f, data);
+
+ /* NFI, audio doesn't work without it though.. */
+ nv_mask(priv, 0x616548 + hoff, 0x00000070, 0x00000000);
+ return 0;
+}
diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/nv50.c b/drivers/gpu/drm/nouveau/core/engine/disp/nv50.c
index 16a9afb1060b..0f09af135415 100644
--- a/drivers/gpu/drm/nouveau/core/engine/disp/nv50.c
+++ b/drivers/gpu/drm/nouveau/core/engine/disp/nv50.c
@@ -22,21 +22,744 @@
* Authors: Ben Skeggs
*/
+#include <core/object.h>
+#include <core/parent.h>
+#include <core/handle.h>
+#include <core/class.h>
+
#include <engine/software.h>
#include <engine/disp.h>
-struct nv50_disp_priv {
- struct nouveau_disp base;
+#include <subdev/bios.h>
+#include <subdev/bios/dcb.h>
+#include <subdev/bios/disp.h>
+#include <subdev/bios/init.h>
+#include <subdev/bios/pll.h>
+#include <subdev/timer.h>
+#include <subdev/fb.h>
+#include <subdev/bar.h>
+#include <subdev/clock.h>
+
+#include "nv50.h"
+
+/*******************************************************************************
+ * EVO channel base class
+ ******************************************************************************/
+
+int
+nv50_disp_chan_create_(struct nouveau_object *parent,
+ struct nouveau_object *engine,
+ struct nouveau_oclass *oclass, int chid,
+ int length, void **pobject)
+{
+ struct nv50_disp_base *base = (void *)parent;
+ struct nv50_disp_chan *chan;
+ int ret;
+
+ if (base->chan & (1 << chid))
+ return -EBUSY;
+ base->chan |= (1 << chid);
+
+ ret = nouveau_namedb_create_(parent, engine, oclass, 0, NULL,
+ (1ULL << NVDEV_ENGINE_DMAOBJ),
+ length, pobject);
+ chan = *pobject;
+ if (ret)
+ return ret;
+
+ chan->chid = chid;
+ return 0;
+}
+
+void
+nv50_disp_chan_destroy(struct nv50_disp_chan *chan)
+{
+ struct nv50_disp_base *base = (void *)nv_object(chan)->parent;
+ base->chan &= ~(1 << chan->chid);
+ nouveau_namedb_destroy(&chan->base);
+}
+
+u32
+nv50_disp_chan_rd32(struct nouveau_object *object, u64 addr)
+{
+ struct nv50_disp_priv *priv = (void *)object->engine;
+ struct nv50_disp_chan *chan = (void *)object;
+ return nv_rd32(priv, 0x640000 + (chan->chid * 0x1000) + addr);
+}
+
+void
+nv50_disp_chan_wr32(struct nouveau_object *object, u64 addr, u32 data)
+{
+ struct nv50_disp_priv *priv = (void *)object->engine;
+ struct nv50_disp_chan *chan = (void *)object;
+ nv_wr32(priv, 0x640000 + (chan->chid * 0x1000) + addr, data);
+}
+
+/*******************************************************************************
+ * EVO DMA channel base class
+ ******************************************************************************/
+
+static int
+nv50_disp_dmac_object_attach(struct nouveau_object *parent,
+ struct nouveau_object *object, u32 name)
+{
+ struct nv50_disp_base *base = (void *)parent->parent;
+ struct nv50_disp_chan *chan = (void *)parent;
+ u32 addr = nv_gpuobj(object)->node->offset;
+ u32 chid = chan->chid;
+ u32 data = (chid << 28) | (addr << 10) | chid;
+ return nouveau_ramht_insert(base->ramht, chid, name, data);
+}
+
+static void
+nv50_disp_dmac_object_detach(struct nouveau_object *parent, int cookie)
+{
+ struct nv50_disp_base *base = (void *)parent->parent;
+ nouveau_ramht_remove(base->ramht, cookie);
+}
+
+int
+nv50_disp_dmac_create_(struct nouveau_object *parent,
+ struct nouveau_object *engine,
+ struct nouveau_oclass *oclass, u32 pushbuf, int chid,
+ int length, void **pobject)
+{
+ struct nv50_disp_dmac *dmac;
+ int ret;
+
+ ret = nv50_disp_chan_create_(parent, engine, oclass, chid,
+ length, pobject);
+ dmac = *pobject;
+ if (ret)
+ return ret;
+
+ dmac->pushdma = (void *)nouveau_handle_ref(parent, pushbuf);
+ if (!dmac->pushdma)
+ return -ENOENT;
+
+ switch (nv_mclass(dmac->pushdma)) {
+ case 0x0002:
+ case 0x003d:
+ if (dmac->pushdma->limit - dmac->pushdma->start != 0xfff)
+ return -EINVAL;
+
+ switch (dmac->pushdma->target) {
+ case NV_MEM_TARGET_VRAM:
+ dmac->push = 0x00000000 | dmac->pushdma->start >> 8;
+ break;
+ case NV_MEM_TARGET_PCI_NOSNOOP:
+ dmac->push = 0x00000003 | dmac->pushdma->start >> 8;
+ break;
+ default:
+ return -EINVAL;
+ }
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+void
+nv50_disp_dmac_dtor(struct nouveau_object *object)
+{
+ struct nv50_disp_dmac *dmac = (void *)object;
+ nouveau_object_ref(NULL, (struct nouveau_object **)&dmac->pushdma);
+ nv50_disp_chan_destroy(&dmac->base);
+}
+
+static int
+nv50_disp_dmac_init(struct nouveau_object *object)
+{
+ struct nv50_disp_priv *priv = (void *)object->engine;
+ struct nv50_disp_dmac *dmac = (void *)object;
+ int chid = dmac->base.chid;
+ int ret;
+
+ ret = nv50_disp_chan_init(&dmac->base);
+ if (ret)
+ return ret;
+
+ /* enable error reporting */
+ nv_mask(priv, 0x610028, 0x00010001 << chid, 0x00010001 << chid);
+
+ /* initialise channel for dma command submission */
+ nv_wr32(priv, 0x610204 + (chid * 0x0010), dmac->push);
+ nv_wr32(priv, 0x610208 + (chid * 0x0010), 0x00010000);
+ nv_wr32(priv, 0x61020c + (chid * 0x0010), chid);
+ nv_mask(priv, 0x610200 + (chid * 0x0010), 0x00000010, 0x00000010);
+ nv_wr32(priv, 0x640000 + (chid * 0x1000), 0x00000000);
+ nv_wr32(priv, 0x610200 + (chid * 0x0010), 0x00000013);
+
+ /* wait for it to go inactive */
+ if (!nv_wait(priv, 0x610200 + (chid * 0x10), 0x80000000, 0x00000000)) {
+ nv_error(dmac, "init timeout, 0x%08x\n",
+ nv_rd32(priv, 0x610200 + (chid * 0x10)));
+ return -EBUSY;
+ }
+
+ return 0;
+}
+
+static int
+nv50_disp_dmac_fini(struct nouveau_object *object, bool suspend)
+{
+ struct nv50_disp_priv *priv = (void *)object->engine;
+ struct nv50_disp_dmac *dmac = (void *)object;
+ int chid = dmac->base.chid;
+
+ /* deactivate channel */
+ nv_mask(priv, 0x610200 + (chid * 0x0010), 0x00001010, 0x00001000);
+ nv_mask(priv, 0x610200 + (chid * 0x0010), 0x00000003, 0x00000000);
+ if (!nv_wait(priv, 0x610200 + (chid * 0x10), 0x001e0000, 0x00000000)) {
+ nv_error(dmac, "fini timeout, 0x%08x\n",
+ nv_rd32(priv, 0x610200 + (chid * 0x10)));
+ if (suspend)
+ return -EBUSY;
+ }
+
+ /* disable error reporting */
+ nv_mask(priv, 0x610028, 0x00010001 << chid, 0x00000000 << chid);
+
+ return nv50_disp_chan_fini(&dmac->base, suspend);
+}
+
+/*******************************************************************************
+ * EVO master channel object
+ ******************************************************************************/
+
+static int
+nv50_disp_mast_ctor(struct nouveau_object *parent,
+ struct nouveau_object *engine,
+ struct nouveau_oclass *oclass, void *data, u32 size,
+ struct nouveau_object **pobject)
+{
+ struct nv50_display_mast_class *args = data;
+ struct nv50_disp_dmac *mast;
+ int ret;
+
+ if (size < sizeof(*args))
+ return -EINVAL;
+
+ ret = nv50_disp_dmac_create_(parent, engine, oclass, args->pushbuf,
+ 0, sizeof(*mast), (void **)&mast);
+ *pobject = nv_object(mast);
+ if (ret)
+ return ret;
+
+ nv_parent(mast)->object_attach = nv50_disp_dmac_object_attach;
+ nv_parent(mast)->object_detach = nv50_disp_dmac_object_detach;
+ return 0;
+}
+
+static int
+nv50_disp_mast_init(struct nouveau_object *object)
+{
+ struct nv50_disp_priv *priv = (void *)object->engine;
+ struct nv50_disp_dmac *mast = (void *)object;
+ int ret;
+
+ ret = nv50_disp_chan_init(&mast->base);
+ if (ret)
+ return ret;
+
+ /* enable error reporting */
+ nv_mask(priv, 0x610028, 0x00010001, 0x00010001);
+
+ /* attempt to unstick channel from some unknown state */
+ if ((nv_rd32(priv, 0x610200) & 0x009f0000) == 0x00020000)
+ nv_mask(priv, 0x610200, 0x00800000, 0x00800000);
+ if ((nv_rd32(priv, 0x610200) & 0x003f0000) == 0x00030000)
+ nv_mask(priv, 0x610200, 0x00600000, 0x00600000);
+
+ /* initialise channel for dma command submission */
+ nv_wr32(priv, 0x610204, mast->push);
+ nv_wr32(priv, 0x610208, 0x00010000);
+ nv_wr32(priv, 0x61020c, 0x00000000);
+ nv_mask(priv, 0x610200, 0x00000010, 0x00000010);
+ nv_wr32(priv, 0x640000, 0x00000000);
+ nv_wr32(priv, 0x610200, 0x01000013);
+
+ /* wait for it to go inactive */
+ if (!nv_wait(priv, 0x610200, 0x80000000, 0x00000000)) {
+ nv_error(mast, "init: 0x%08x\n", nv_rd32(priv, 0x610200));
+ return -EBUSY;
+ }
+
+ return 0;
+}
+
+static int
+nv50_disp_mast_fini(struct nouveau_object *object, bool suspend)
+{
+ struct nv50_disp_priv *priv = (void *)object->engine;
+ struct nv50_disp_dmac *mast = (void *)object;
+
+ /* deactivate channel */
+ nv_mask(priv, 0x610200, 0x00000010, 0x00000000);
+ nv_mask(priv, 0x610200, 0x00000003, 0x00000000);
+ if (!nv_wait(priv, 0x610200, 0x001e0000, 0x00000000)) {
+ nv_error(mast, "fini: 0x%08x\n", nv_rd32(priv, 0x610200));
+ if (suspend)
+ return -EBUSY;
+ }
+
+ /* disable error reporting */
+ nv_mask(priv, 0x610028, 0x00010001, 0x00000000);
+
+ return nv50_disp_chan_fini(&mast->base, suspend);
+}
+
+struct nouveau_ofuncs
+nv50_disp_mast_ofuncs = {
+ .ctor = nv50_disp_mast_ctor,
+ .dtor = nv50_disp_dmac_dtor,
+ .init = nv50_disp_mast_init,
+ .fini = nv50_disp_mast_fini,
+ .rd32 = nv50_disp_chan_rd32,
+ .wr32 = nv50_disp_chan_wr32,
+};
+
+/*******************************************************************************
+ * EVO sync channel objects
+ ******************************************************************************/
+
+static int
+nv50_disp_sync_ctor(struct nouveau_object *parent,
+ struct nouveau_object *engine,
+ struct nouveau_oclass *oclass, void *data, u32 size,
+ struct nouveau_object **pobject)
+{
+ struct nv50_display_sync_class *args = data;
+ struct nv50_disp_dmac *dmac;
+ int ret;
+
+ if (size < sizeof(*data) || args->head > 1)
+ return -EINVAL;
+
+ ret = nv50_disp_dmac_create_(parent, engine, oclass, args->pushbuf,
+ 1 + args->head, sizeof(*dmac),
+ (void **)&dmac);
+ *pobject = nv_object(dmac);
+ if (ret)
+ return ret;
+
+ nv_parent(dmac)->object_attach = nv50_disp_dmac_object_attach;
+ nv_parent(dmac)->object_detach = nv50_disp_dmac_object_detach;
+ return 0;
+}
+
+struct nouveau_ofuncs
+nv50_disp_sync_ofuncs = {
+ .ctor = nv50_disp_sync_ctor,
+ .dtor = nv50_disp_dmac_dtor,
+ .init = nv50_disp_dmac_init,
+ .fini = nv50_disp_dmac_fini,
+ .rd32 = nv50_disp_chan_rd32,
+ .wr32 = nv50_disp_chan_wr32,
+};
+
+/*******************************************************************************
+ * EVO overlay channel objects
+ ******************************************************************************/
+
+static int
+nv50_disp_ovly_ctor(struct nouveau_object *parent,
+ struct nouveau_object *engine,
+ struct nouveau_oclass *oclass, void *data, u32 size,
+ struct nouveau_object **pobject)
+{
+ struct nv50_display_ovly_class *args = data;
+ struct nv50_disp_dmac *dmac;
+ int ret;
+
+ if (size < sizeof(*data) || args->head > 1)
+ return -EINVAL;
+
+ ret = nv50_disp_dmac_create_(parent, engine, oclass, args->pushbuf,
+ 3 + args->head, sizeof(*dmac),
+ (void **)&dmac);
+ *pobject = nv_object(dmac);
+ if (ret)
+ return ret;
+
+ nv_parent(dmac)->object_attach = nv50_disp_dmac_object_attach;
+ nv_parent(dmac)->object_detach = nv50_disp_dmac_object_detach;
+ return 0;
+}
+
+struct nouveau_ofuncs
+nv50_disp_ovly_ofuncs = {
+ .ctor = nv50_disp_ovly_ctor,
+ .dtor = nv50_disp_dmac_dtor,
+ .init = nv50_disp_dmac_init,
+ .fini = nv50_disp_dmac_fini,
+ .rd32 = nv50_disp_chan_rd32,
+ .wr32 = nv50_disp_chan_wr32,
+};
+
+/*******************************************************************************
+ * EVO PIO channel base class
+ ******************************************************************************/
+
+static int
+nv50_disp_pioc_create_(struct nouveau_object *parent,
+ struct nouveau_object *engine,
+ struct nouveau_oclass *oclass, int chid,
+ int length, void **pobject)
+{
+ return nv50_disp_chan_create_(parent, engine, oclass, chid,
+ length, pobject);
+}
+
+static void
+nv50_disp_pioc_dtor(struct nouveau_object *object)
+{
+ struct nv50_disp_pioc *pioc = (void *)object;
+ nv50_disp_chan_destroy(&pioc->base);
+}
+
+static int
+nv50_disp_pioc_init(struct nouveau_object *object)
+{
+ struct nv50_disp_priv *priv = (void *)object->engine;
+ struct nv50_disp_pioc *pioc = (void *)object;
+ int chid = pioc->base.chid;
+ int ret;
+
+ ret = nv50_disp_chan_init(&pioc->base);
+ if (ret)
+ return ret;
+
+ nv_wr32(priv, 0x610200 + (chid * 0x10), 0x00002000);
+ if (!nv_wait(priv, 0x610200 + (chid * 0x10), 0x00000000, 0x00000000)) {
+ nv_error(pioc, "timeout0: 0x%08x\n",
+ nv_rd32(priv, 0x610200 + (chid * 0x10)));
+ return -EBUSY;
+ }
+
+ nv_wr32(priv, 0x610200 + (chid * 0x10), 0x00000001);
+ if (!nv_wait(priv, 0x610200 + (chid * 0x10), 0x00030000, 0x00010000)) {
+ nv_error(pioc, "timeout1: 0x%08x\n",
+ nv_rd32(priv, 0x610200 + (chid * 0x10)));
+ return -EBUSY;
+ }
+
+ return 0;
+}
+
+static int
+nv50_disp_pioc_fini(struct nouveau_object *object, bool suspend)
+{
+ struct nv50_disp_priv *priv = (void *)object->engine;
+ struct nv50_disp_pioc *pioc = (void *)object;
+ int chid = pioc->base.chid;
+
+ nv_mask(priv, 0x610200 + (chid * 0x10), 0x00000001, 0x00000000);
+ if (!nv_wait(priv, 0x610200 + (chid * 0x10), 0x00030000, 0x00000000)) {
+ nv_error(pioc, "timeout: 0x%08x\n",
+ nv_rd32(priv, 0x610200 + (chid * 0x10)));
+ if (suspend)
+ return -EBUSY;
+ }
+
+ return nv50_disp_chan_fini(&pioc->base, suspend);
+}
+
+/*******************************************************************************
+ * EVO immediate overlay channel objects
+ ******************************************************************************/
+
+static int
+nv50_disp_oimm_ctor(struct nouveau_object *parent,
+ struct nouveau_object *engine,
+ struct nouveau_oclass *oclass, void *data, u32 size,
+ struct nouveau_object **pobject)
+{
+ struct nv50_display_oimm_class *args = data;
+ struct nv50_disp_pioc *pioc;
+ int ret;
+
+ if (size < sizeof(*args) || args->head > 1)
+ return -EINVAL;
+
+ ret = nv50_disp_pioc_create_(parent, engine, oclass, 5 + args->head,
+ sizeof(*pioc), (void **)&pioc);
+ *pobject = nv_object(pioc);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+struct nouveau_ofuncs
+nv50_disp_oimm_ofuncs = {
+ .ctor = nv50_disp_oimm_ctor,
+ .dtor = nv50_disp_pioc_dtor,
+ .init = nv50_disp_pioc_init,
+ .fini = nv50_disp_pioc_fini,
+ .rd32 = nv50_disp_chan_rd32,
+ .wr32 = nv50_disp_chan_wr32,
+};
+
+/*******************************************************************************
+ * EVO cursor channel objects
+ ******************************************************************************/
+
+static int
+nv50_disp_curs_ctor(struct nouveau_object *parent,
+ struct nouveau_object *engine,
+ struct nouveau_oclass *oclass, void *data, u32 size,
+ struct nouveau_object **pobject)
+{
+ struct nv50_display_curs_class *args = data;
+ struct nv50_disp_pioc *pioc;
+ int ret;
+
+ if (size < sizeof(*args) || args->head > 1)
+ return -EINVAL;
+
+ ret = nv50_disp_pioc_create_(parent, engine, oclass, 7 + args->head,
+ sizeof(*pioc), (void **)&pioc);
+ *pobject = nv_object(pioc);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+struct nouveau_ofuncs
+nv50_disp_curs_ofuncs = {
+ .ctor = nv50_disp_curs_ctor,
+ .dtor = nv50_disp_pioc_dtor,
+ .init = nv50_disp_pioc_init,
+ .fini = nv50_disp_pioc_fini,
+ .rd32 = nv50_disp_chan_rd32,
+ .wr32 = nv50_disp_chan_wr32,
+};
+
+/*******************************************************************************
+ * Base display object
+ ******************************************************************************/
+
+static int
+nv50_disp_base_ctor(struct nouveau_object *parent,
+ struct nouveau_object *engine,
+ struct nouveau_oclass *oclass, void *data, u32 size,
+ struct nouveau_object **pobject)
+{
+ struct nv50_disp_priv *priv = (void *)engine;
+ struct nv50_disp_base *base;
+ int ret;
+
+ ret = nouveau_parent_create(parent, engine, oclass, 0,
+ priv->sclass, 0, &base);
+ *pobject = nv_object(base);
+ if (ret)
+ return ret;
+
+ return nouveau_ramht_new(parent, parent, 0x1000, 0, &base->ramht);
+}
+
+static void
+nv50_disp_base_dtor(struct nouveau_object *object)
+{
+ struct nv50_disp_base *base = (void *)object;
+ nouveau_ramht_ref(NULL, &base->ramht);
+ nouveau_parent_destroy(&base->base);
+}
+
+static int
+nv50_disp_base_init(struct nouveau_object *object)
+{
+ struct nv50_disp_priv *priv = (void *)object->engine;
+ struct nv50_disp_base *base = (void *)object;
+ int ret, i;
+ u32 tmp;
+
+ ret = nouveau_parent_init(&base->base);
+ if (ret)
+ return ret;
+
+ /* The below segments of code copying values from one register to
+ * another appear to inform EVO of the display capabilities or
+ * something similar. NFI what the 0x614004 caps are for..
+ */
+ tmp = nv_rd32(priv, 0x614004);
+ nv_wr32(priv, 0x610184, tmp);
+
+ /* ... CRTC caps */
+ for (i = 0; i < priv->head.nr; i++) {
+ tmp = nv_rd32(priv, 0x616100 + (i * 0x800));
+ nv_wr32(priv, 0x610190 + (i * 0x10), tmp);
+ tmp = nv_rd32(priv, 0x616104 + (i * 0x800));
+ nv_wr32(priv, 0x610194 + (i * 0x10), tmp);
+ tmp = nv_rd32(priv, 0x616108 + (i * 0x800));
+ nv_wr32(priv, 0x610198 + (i * 0x10), tmp);
+ tmp = nv_rd32(priv, 0x61610c + (i * 0x800));
+ nv_wr32(priv, 0x61019c + (i * 0x10), tmp);
+ }
+
+ /* ... DAC caps */
+ for (i = 0; i < priv->dac.nr; i++) {
+ tmp = nv_rd32(priv, 0x61a000 + (i * 0x800));
+ nv_wr32(priv, 0x6101d0 + (i * 0x04), tmp);
+ }
+
+ /* ... SOR caps */
+ for (i = 0; i < priv->sor.nr; i++) {
+ tmp = nv_rd32(priv, 0x61c000 + (i * 0x800));
+ nv_wr32(priv, 0x6101e0 + (i * 0x04), tmp);
+ }
+
+ /* ... EXT caps */
+ for (i = 0; i < 3; i++) {
+ tmp = nv_rd32(priv, 0x61e000 + (i * 0x800));
+ nv_wr32(priv, 0x6101f0 + (i * 0x04), tmp);
+ }
+
+ /* steal display away from vbios, or something like that */
+ if (nv_rd32(priv, 0x610024) & 0x00000100) {
+ nv_wr32(priv, 0x610024, 0x00000100);
+ nv_mask(priv, 0x6194e8, 0x00000001, 0x00000000);
+ if (!nv_wait(priv, 0x6194e8, 0x00000002, 0x00000000)) {
+ nv_error(priv, "timeout acquiring display\n");
+ return -EBUSY;
+ }
+ }
+
+ /* point at display engine memory area (hash table, objects) */
+ nv_wr32(priv, 0x610010, (nv_gpuobj(base->ramht)->addr >> 8) | 9);
+
+ /* enable supervisor interrupts, disable everything else */
+ nv_wr32(priv, 0x61002c, 0x00000370);
+ nv_wr32(priv, 0x610028, 0x00000000);
+ return 0;
+}
+
+static int
+nv50_disp_base_fini(struct nouveau_object *object, bool suspend)
+{
+ struct nv50_disp_priv *priv = (void *)object->engine;
+ struct nv50_disp_base *base = (void *)object;
+
+ /* disable all interrupts */
+ nv_wr32(priv, 0x610024, 0x00000000);
+ nv_wr32(priv, 0x610020, 0x00000000);
+
+ return nouveau_parent_fini(&base->base, suspend);
+}
+
+struct nouveau_ofuncs
+nv50_disp_base_ofuncs = {
+ .ctor = nv50_disp_base_ctor,
+ .dtor = nv50_disp_base_dtor,
+ .init = nv50_disp_base_init,
+ .fini = nv50_disp_base_fini,
+};
+
+static struct nouveau_omthds
+nv50_disp_base_omthds[] = {
+ { SOR_MTHD(NV50_DISP_SOR_PWR) , nv50_sor_mthd },
+ { SOR_MTHD(NV50_DISP_SOR_LVDS_SCRIPT) , nv50_sor_mthd },
+ { DAC_MTHD(NV50_DISP_DAC_PWR) , nv50_dac_mthd },
+ { DAC_MTHD(NV50_DISP_DAC_LOAD) , nv50_dac_mthd },
+ {},
+};
+
+static struct nouveau_oclass
+nv50_disp_base_oclass[] = {
+ { NV50_DISP_CLASS, &nv50_disp_base_ofuncs, nv50_disp_base_omthds },
+ {}
};
static struct nouveau_oclass
nv50_disp_sclass[] = {
- {},
+ { NV50_DISP_MAST_CLASS, &nv50_disp_mast_ofuncs },
+ { NV50_DISP_SYNC_CLASS, &nv50_disp_sync_ofuncs },
+ { NV50_DISP_OVLY_CLASS, &nv50_disp_ovly_ofuncs },
+ { NV50_DISP_OIMM_CLASS, &nv50_disp_oimm_ofuncs },
+ { NV50_DISP_CURS_CLASS, &nv50_disp_curs_ofuncs },
+ {}
+};
+
+/*******************************************************************************
+ * Display context, tracks instmem allocation and prevents more than one
+ * client using the display hardware at any time.
+ ******************************************************************************/
+
+static int
+nv50_disp_data_ctor(struct nouveau_object *parent,
+ struct nouveau_object *engine,
+ struct nouveau_oclass *oclass, void *data, u32 size,
+ struct nouveau_object **pobject)
+{
+ struct nv50_disp_priv *priv = (void *)engine;
+ struct nouveau_engctx *ectx;
+ int ret = -EBUSY;
+
+ /* no context needed for channel objects... */
+ if (nv_mclass(parent) != NV_DEVICE_CLASS) {
+ atomic_inc(&parent->refcount);
+ *pobject = parent;
+ return 0;
+ }
+
+ /* allocate display hardware to client */
+ mutex_lock(&nv_subdev(priv)->mutex);
+ if (list_empty(&nv_engine(priv)->contexts)) {
+ ret = nouveau_engctx_create(parent, engine, oclass, NULL,
+ 0x10000, 0x10000,
+ NVOBJ_FLAG_HEAP, &ectx);
+ *pobject = nv_object(ectx);
+ }
+ mutex_unlock(&nv_subdev(priv)->mutex);
+ return ret;
+}
+
+struct nouveau_oclass
+nv50_disp_cclass = {
+ .handle = NV_ENGCTX(DISP, 0x50),
+ .ofuncs = &(struct nouveau_ofuncs) {
+ .ctor = nv50_disp_data_ctor,
+ .dtor = _nouveau_engctx_dtor,
+ .init = _nouveau_engctx_init,
+ .fini = _nouveau_engctx_fini,
+ .rd32 = _nouveau_engctx_rd32,
+ .wr32 = _nouveau_engctx_wr32,
+ },
};
+/*******************************************************************************
+ * Display engine implementation
+ ******************************************************************************/
+
+static void
+nv50_disp_intr_error(struct nv50_disp_priv *priv)
+{
+ u32 channels = (nv_rd32(priv, 0x610020) & 0x001f0000) >> 16;
+ u32 addr, data;
+ int chid;
+
+ for (chid = 0; chid < 5; chid++) {
+ if (!(channels & (1 << chid)))
+ continue;
+
+ nv_wr32(priv, 0x610020, 0x00010000 << chid);
+ addr = nv_rd32(priv, 0x610080 + (chid * 0x08));
+ data = nv_rd32(priv, 0x610084 + (chid * 0x08));
+ nv_wr32(priv, 0x610080 + (chid * 0x08), 0x90000000);
+
+ nv_error(priv, "chid %d mthd 0x%04x data 0x%08x 0x%08x\n",
+ chid, addr & 0xffc, data, addr);
+ }
+}
+
static void
nv50_disp_intr_vblank(struct nv50_disp_priv *priv, int crtc)
{
+ struct nouveau_bar *bar = nouveau_bar(priv);
struct nouveau_disp *disp = &priv->base;
struct nouveau_software_chan *chan, *temp;
unsigned long flags;
@@ -46,19 +769,25 @@ nv50_disp_intr_vblank(struct nv50_disp_priv *priv, int crtc)
if (chan->vblank.crtc != crtc)
continue;
- nv_wr32(priv, 0x001704, chan->vblank.channel);
- nv_wr32(priv, 0x001710, 0x80000000 | chan->vblank.ctxdma);
-
- if (nv_device(priv)->chipset == 0x50) {
- nv_wr32(priv, 0x001570, chan->vblank.offset);
- nv_wr32(priv, 0x001574, chan->vblank.value);
+ if (nv_device(priv)->chipset >= 0xc0) {
+ nv_wr32(priv, 0x001718, 0x80000000 | chan->vblank.channel);
+ bar->flush(bar);
+ nv_wr32(priv, 0x06000c,
+ upper_32_bits(chan->vblank.offset));
+ nv_wr32(priv, 0x060010,
+ lower_32_bits(chan->vblank.offset));
+ nv_wr32(priv, 0x060014, chan->vblank.value);
} else {
- if (nv_device(priv)->chipset >= 0xc0) {
- nv_wr32(priv, 0x06000c,
- upper_32_bits(chan->vblank.offset));
+ nv_wr32(priv, 0x001704, chan->vblank.channel);
+ nv_wr32(priv, 0x001710, 0x80000000 | chan->vblank.ctxdma);
+ bar->flush(bar);
+ if (nv_device(priv)->chipset == 0x50) {
+ nv_wr32(priv, 0x001570, chan->vblank.offset);
+ nv_wr32(priv, 0x001574, chan->vblank.value);
+ } else {
+ nv_wr32(priv, 0x060010, chan->vblank.offset);
+ nv_wr32(priv, 0x060014, chan->vblank.value);
}
- nv_wr32(priv, 0x060010, chan->vblank.offset);
- nv_wr32(priv, 0x060014, chan->vblank.value);
}
list_del(&chan->vblank.head);
@@ -71,30 +800,422 @@ nv50_disp_intr_vblank(struct nv50_disp_priv *priv, int crtc)
disp->vblank.notify(disp->vblank.data, crtc);
}
+static u16
+exec_lookup(struct nv50_disp_priv *priv, int head, int outp, u32 ctrl,
+ struct dcb_output *dcb, u8 *ver, u8 *hdr, u8 *cnt, u8 *len,
+ struct nvbios_outp *info)
+{
+ struct nouveau_bios *bios = nouveau_bios(priv);
+ u16 mask, type, data;
+
+ if (outp < 4) {
+ type = DCB_OUTPUT_ANALOG;
+ mask = 0;
+ } else {
+ outp -= 4;
+ switch (ctrl & 0x00000f00) {
+ case 0x00000000: type = DCB_OUTPUT_LVDS; mask = 1; break;
+ case 0x00000100: type = DCB_OUTPUT_TMDS; mask = 1; break;
+ case 0x00000200: type = DCB_OUTPUT_TMDS; mask = 2; break;
+ case 0x00000500: type = DCB_OUTPUT_TMDS; mask = 3; break;
+ case 0x00000800: type = DCB_OUTPUT_DP; mask = 1; break;
+ case 0x00000900: type = DCB_OUTPUT_DP; mask = 2; break;
+ default:
+ nv_error(priv, "unknown SOR mc 0x%08x\n", ctrl);
+ return 0x0000;
+ }
+ }
+
+ mask = 0x00c0 & (mask << 6);
+ mask |= 0x0001 << outp;
+ mask |= 0x0100 << head;
+
+ data = dcb_outp_match(bios, type, mask, ver, hdr, dcb);
+ if (!data)
+ return 0x0000;
+
+ return nvbios_outp_match(bios, type, mask, ver, hdr, cnt, len, info);
+}
+
+static bool
+exec_script(struct nv50_disp_priv *priv, int head, int id)
+{
+ struct nouveau_bios *bios = nouveau_bios(priv);
+ struct nvbios_outp info;
+ struct dcb_output dcb;
+ u8 ver, hdr, cnt, len;
+ u16 data;
+ u32 ctrl = 0x00000000;
+ int i;
+
+ for (i = 0; !(ctrl & (1 << head)) && i < 3; i++)
+ ctrl = nv_rd32(priv, 0x610b5c + (i * 8));
+
+ if (nv_device(priv)->chipset < 0x90 ||
+ nv_device(priv)->chipset == 0x92 ||
+ nv_device(priv)->chipset == 0xa0) {
+ for (i = 0; !(ctrl & (1 << head)) && i < 2; i++)
+ ctrl = nv_rd32(priv, 0x610b74 + (i * 8));
+ i += 3;
+ } else {
+ for (i = 0; !(ctrl & (1 << head)) && i < 4; i++)
+ ctrl = nv_rd32(priv, 0x610798 + (i * 8));
+ i += 3;
+ }
+
+ if (!(ctrl & (1 << head)))
+ return false;
+
+ data = exec_lookup(priv, head, i, ctrl, &dcb, &ver, &hdr, &cnt, &len, &info);
+ if (data) {
+ struct nvbios_init init = {
+ .subdev = nv_subdev(priv),
+ .bios = bios,
+ .offset = info.script[id],
+ .outp = &dcb,
+ .crtc = head,
+ .execute = 1,
+ };
+
+ return nvbios_exec(&init) == 0;
+ }
+
+ return false;
+}
+
+static u32
+exec_clkcmp(struct nv50_disp_priv *priv, int head, int id, u32 pclk,
+ struct dcb_output *outp)
+{
+ struct nouveau_bios *bios = nouveau_bios(priv);
+ struct nvbios_outp info1;
+ struct nvbios_ocfg info2;
+ u8 ver, hdr, cnt, len;
+ u16 data, conf;
+ u32 ctrl = 0x00000000;
+ int i;
+
+ for (i = 0; !(ctrl & (1 << head)) && i < 3; i++)
+ ctrl = nv_rd32(priv, 0x610b58 + (i * 8));
+
+ if (nv_device(priv)->chipset < 0x90 ||
+ nv_device(priv)->chipset == 0x92 ||
+ nv_device(priv)->chipset == 0xa0) {
+ for (i = 0; !(ctrl & (1 << head)) && i < 2; i++)
+ ctrl = nv_rd32(priv, 0x610b70 + (i * 8));
+ i += 3;
+ } else {
+ for (i = 0; !(ctrl & (1 << head)) && i < 4; i++)
+ ctrl = nv_rd32(priv, 0x610794 + (i * 8));
+ i += 3;
+ }
+
+ if (!(ctrl & (1 << head)))
+ return 0x0000;
+
+ data = exec_lookup(priv, head, i, ctrl, outp, &ver, &hdr, &cnt, &len, &info1);
+ if (!data)
+ return 0x0000;
+
+ switch (outp->type) {
+ case DCB_OUTPUT_TMDS:
+ conf = (ctrl & 0x00000f00) >> 8;
+ if (pclk >= 165000)
+ conf |= 0x0100;
+ break;
+ case DCB_OUTPUT_LVDS:
+ conf = priv->sor.lvdsconf;
+ break;
+ case DCB_OUTPUT_DP:
+ conf = (ctrl & 0x00000f00) >> 8;
+ break;
+ case DCB_OUTPUT_ANALOG:
+ default:
+ conf = 0x00ff;
+ break;
+ }
+
+ data = nvbios_ocfg_match(bios, data, conf, &ver, &hdr, &cnt, &len, &info2);
+ if (data) {
+ data = nvbios_oclk_match(bios, info2.clkcmp[id], pclk);
+ if (data) {
+ struct nvbios_init init = {
+ .subdev = nv_subdev(priv),
+ .bios = bios,
+ .offset = data,
+ .outp = outp,
+ .crtc = head,
+ .execute = 1,
+ };
+
+ if (nvbios_exec(&init))
+ return 0x0000;
+ return conf;
+ }
+ }
+
+ return 0x0000;
+}
+
+static void
+nv50_disp_intr_unk10(struct nv50_disp_priv *priv, u32 super)
+{
+ int head = ffs((super & 0x00000060) >> 5) - 1;
+ if (head >= 0) {
+ head = ffs((super & 0x00000180) >> 7) - 1;
+ if (head >= 0)
+ exec_script(priv, head, 1);
+ }
+
+ nv_wr32(priv, 0x610024, 0x00000010);
+ nv_wr32(priv, 0x610030, 0x80000000);
+}
+
+static void
+nv50_disp_intr_unk20_dp(struct nv50_disp_priv *priv,
+ struct dcb_output *outp, u32 pclk)
+{
+ const int link = !(outp->sorconf.link & 1);
+ const int or = ffs(outp->or) - 1;
+ const u32 soff = ( or * 0x800);
+ const u32 loff = (link * 0x080) + soff;
+ const u32 ctrl = nv_rd32(priv, 0x610794 + (or * 8));
+ const u32 symbol = 100000;
+ u32 dpctrl = nv_rd32(priv, 0x61c10c + loff) & 0x0000f0000;
+ u32 clksor = nv_rd32(priv, 0x614300 + soff);
+ int bestTU = 0, bestVTUi = 0, bestVTUf = 0, bestVTUa = 0;
+ int TU, VTUi, VTUf, VTUa;
+ u64 link_data_rate, link_ratio, unk;
+ u32 best_diff = 64 * symbol;
+ u32 link_nr, link_bw, bits, r;
+
+ /* calculate packed data rate for each lane */
+ if (dpctrl > 0x00030000) link_nr = 4;
+ else if (dpctrl > 0x00010000) link_nr = 2;
+ else link_nr = 1;
+
+ if (clksor & 0x000c0000)
+ link_bw = 270000;
+ else
+ link_bw = 162000;
+
+ if ((ctrl & 0xf0000) == 0x60000) bits = 30;
+ else if ((ctrl & 0xf0000) == 0x50000) bits = 24;
+ else bits = 18;
+
+ link_data_rate = (pclk * bits / 8) / link_nr;
+
+ /* calculate ratio of packed data rate to link symbol rate */
+ link_ratio = link_data_rate * symbol;
+ r = do_div(link_ratio, link_bw);
+
+ for (TU = 64; TU >= 32; TU--) {
+ /* calculate average number of valid symbols in each TU */
+ u32 tu_valid = link_ratio * TU;
+ u32 calc, diff;
+
+ /* find a hw representation for the fraction.. */
+ VTUi = tu_valid / symbol;
+ calc = VTUi * symbol;
+ diff = tu_valid - calc;
+ if (diff) {
+ if (diff >= (symbol / 2)) {
+ VTUf = symbol / (symbol - diff);
+ if (symbol - (VTUf * diff))
+ VTUf++;
+
+ if (VTUf <= 15) {
+ VTUa = 1;
+ calc += symbol - (symbol / VTUf);
+ } else {
+ VTUa = 0;
+ VTUf = 1;
+ calc += symbol;
+ }
+ } else {
+ VTUa = 0;
+ VTUf = min((int)(symbol / diff), 15);
+ calc += symbol / VTUf;
+ }
+
+ diff = calc - tu_valid;
+ } else {
+ /* no remainder, but the hw doesn't like the fractional
+ * part to be zero. decrement the integer part and
+ * have the fraction add a whole symbol back
+ */
+ VTUa = 0;
+ VTUf = 1;
+ VTUi--;
+ }
+
+ if (diff < best_diff) {
+ best_diff = diff;
+ bestTU = TU;
+ bestVTUa = VTUa;
+ bestVTUf = VTUf;
+ bestVTUi = VTUi;
+ if (diff == 0)
+ break;
+ }
+ }
+
+ if (!bestTU) {
+ nv_error(priv, "unable to find suitable dp config\n");
+ return;
+ }
+
+ /* XXX close to vbios numbers, but not right */
+ unk = (symbol - link_ratio) * bestTU;
+ unk *= link_ratio;
+ r = do_div(unk, symbol);
+ r = do_div(unk, symbol);
+ unk += 6;
+
+ nv_mask(priv, 0x61c10c + loff, 0x000001fc, bestTU << 2);
+ nv_mask(priv, 0x61c128 + loff, 0x010f7f3f, bestVTUa << 24 |
+ bestVTUf << 16 |
+ bestVTUi << 8 | unk);
+}
+
+static void
+nv50_disp_intr_unk20(struct nv50_disp_priv *priv, u32 super)
+{
+ struct dcb_output outp;
+ u32 addr, mask, data;
+ int head;
+
+ /* finish detaching encoder? */
+ head = ffs((super & 0x00000180) >> 7) - 1;
+ if (head >= 0)
+ exec_script(priv, head, 2);
+
+ /* check whether a vpll change is required */
+ head = ffs((super & 0x00000600) >> 9) - 1;
+ if (head >= 0) {
+ u32 pclk = nv_rd32(priv, 0x610ad0 + (head * 0x540)) & 0x3fffff;
+ if (pclk) {
+ struct nouveau_clock *clk = nouveau_clock(priv);
+ clk->pll_set(clk, PLL_VPLL0 + head, pclk);
+ }
+
+ nv_mask(priv, 0x614200 + head * 0x800, 0x0000000f, 0x00000000);
+ }
+
+ /* (re)attach the relevant OR to the head */
+ head = ffs((super & 0x00000180) >> 7) - 1;
+ if (head >= 0) {
+ u32 pclk = nv_rd32(priv, 0x610ad0 + (head * 0x540)) & 0x3fffff;
+ u32 conf = exec_clkcmp(priv, head, 0, pclk, &outp);
+ if (conf) {
+ if (outp.type == DCB_OUTPUT_ANALOG) {
+ addr = 0x614280 + (ffs(outp.or) - 1) * 0x800;
+ mask = 0xffffffff;
+ data = 0x00000000;
+ } else {
+ if (outp.type == DCB_OUTPUT_DP)
+ nv50_disp_intr_unk20_dp(priv, &outp, pclk);
+ addr = 0x614300 + (ffs(outp.or) - 1) * 0x800;
+ mask = 0x00000707;
+ data = (conf & 0x0100) ? 0x0101 : 0x0000;
+ }
+
+ nv_mask(priv, addr, mask, data);
+ }
+ }
+
+ nv_wr32(priv, 0x610024, 0x00000020);
+ nv_wr32(priv, 0x610030, 0x80000000);
+}
+
+/* If programming a TMDS output on a SOR that can also be configured for
+ * DisplayPort, make sure NV50_SOR_DP_CTRL_ENABLE is forced off.
+ *
+ * It looks like the VBIOS TMDS scripts make an attempt at this, however,
+ * the VBIOS scripts on at least one board I have only switch it off on
+ * link 0, causing a blank display if the output has previously been
+ * programmed for DisplayPort.
+ */
+static void
+nv50_disp_intr_unk40_tmds(struct nv50_disp_priv *priv, struct dcb_output *outp)
+{
+ struct nouveau_bios *bios = nouveau_bios(priv);
+ const int link = !(outp->sorconf.link & 1);
+ const int or = ffs(outp->or) - 1;
+ const u32 loff = (or * 0x800) + (link * 0x80);
+ const u16 mask = (outp->sorconf.link << 6) | outp->or;
+ u8 ver, hdr;
+
+ if (dcb_outp_match(bios, DCB_OUTPUT_DP, mask, &ver, &hdr, outp))
+ nv_mask(priv, 0x61c10c + loff, 0x00000001, 0x00000000);
+}
+
static void
+nv50_disp_intr_unk40(struct nv50_disp_priv *priv, u32 super)
+{
+ int head = ffs((super & 0x00000180) >> 7) - 1;
+ if (head >= 0) {
+ struct dcb_output outp;
+ u32 pclk = nv_rd32(priv, 0x610ad0 + (head * 0x540)) & 0x3fffff;
+ if (pclk && exec_clkcmp(priv, head, 1, pclk, &outp)) {
+ if (outp.type == DCB_OUTPUT_TMDS)
+ nv50_disp_intr_unk40_tmds(priv, &outp);
+ }
+ }
+
+ nv_wr32(priv, 0x610024, 0x00000040);
+ nv_wr32(priv, 0x610030, 0x80000000);
+}
+
+static void
+nv50_disp_intr_super(struct nv50_disp_priv *priv, u32 intr1)
+{
+ u32 super = nv_rd32(priv, 0x610030);
+
+ nv_debug(priv, "supervisor 0x%08x 0x%08x\n", intr1, super);
+
+ if (intr1 & 0x00000010)
+ nv50_disp_intr_unk10(priv, super);
+ if (intr1 & 0x00000020)
+ nv50_disp_intr_unk20(priv, super);
+ if (intr1 & 0x00000040)
+ nv50_disp_intr_unk40(priv, super);
+}
+
+void
nv50_disp_intr(struct nouveau_subdev *subdev)
{
struct nv50_disp_priv *priv = (void *)subdev;
- u32 stat1 = nv_rd32(priv, 0x610024);
+ u32 intr0 = nv_rd32(priv, 0x610020);
+ u32 intr1 = nv_rd32(priv, 0x610024);
- if (stat1 & 0x00000004) {
+ if (intr0 & 0x001f0000) {
+ nv50_disp_intr_error(priv);
+ intr0 &= ~0x001f0000;
+ }
+
+ if (intr1 & 0x00000004) {
nv50_disp_intr_vblank(priv, 0);
nv_wr32(priv, 0x610024, 0x00000004);
- stat1 &= ~0x00000004;
+ intr1 &= ~0x00000004;
}
- if (stat1 & 0x00000008) {
+ if (intr1 & 0x00000008) {
nv50_disp_intr_vblank(priv, 1);
nv_wr32(priv, 0x610024, 0x00000008);
- stat1 &= ~0x00000008;
+ intr1 &= ~0x00000008;
}
+ if (intr1 & 0x00000070) {
+ nv50_disp_intr_super(priv, intr1);
+ intr1 &= ~0x00000070;
+ }
}
static int
nv50_disp_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
- struct nouveau_oclass *oclass, void *data, u32 size,
- struct nouveau_object **pobject)
+ struct nouveau_oclass *oclass, void *data, u32 size,
+ struct nouveau_object **pobject)
{
struct nv50_disp_priv *priv;
int ret;
@@ -105,8 +1226,16 @@ nv50_disp_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
if (ret)
return ret;
- nv_engine(priv)->sclass = nv50_disp_sclass;
+ nv_engine(priv)->sclass = nv50_disp_base_oclass;
+ nv_engine(priv)->cclass = &nv50_disp_cclass;
nv_subdev(priv)->intr = nv50_disp_intr;
+ priv->sclass = nv50_disp_sclass;
+ priv->head.nr = 2;
+ priv->dac.nr = 3;
+ priv->sor.nr = 2;
+ priv->dac.power = nv50_dac_power;
+ priv->dac.sense = nv50_dac_sense;
+ priv->sor.power = nv50_sor_power;
INIT_LIST_HEAD(&priv->base.vblank.list);
spin_lock_init(&priv->base.vblank.lock);
diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/nv50.h b/drivers/gpu/drm/nouveau/core/engine/disp/nv50.h
new file mode 100644
index 000000000000..a6bb931450f1
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/engine/disp/nv50.h
@@ -0,0 +1,142 @@
+#ifndef __NV50_DISP_H__
+#define __NV50_DISP_H__
+
+#include <core/parent.h>
+#include <core/namedb.h>
+#include <core/ramht.h>
+
+#include <engine/dmaobj.h>
+#include <engine/disp.h>
+
+struct dcb_output;
+
+struct nv50_disp_priv {
+ struct nouveau_disp base;
+ struct nouveau_oclass *sclass;
+ struct {
+ int nr;
+ } head;
+ struct {
+ int nr;
+ int (*power)(struct nv50_disp_priv *, int dac, u32 data);
+ int (*sense)(struct nv50_disp_priv *, int dac, u32 load);
+ } dac;
+ struct {
+ int nr;
+ int (*power)(struct nv50_disp_priv *, int sor, u32 data);
+ int (*hda_eld)(struct nv50_disp_priv *, int sor, u8 *, u32);
+ int (*hdmi)(struct nv50_disp_priv *, int head, int sor, u32);
+ int (*dp_train_init)(struct nv50_disp_priv *, int sor, int link,
+ int head, u16 type, u16 mask, u32 data,
+ struct dcb_output *);
+ int (*dp_train_fini)(struct nv50_disp_priv *, int sor, int link,
+ int head, u16 type, u16 mask, u32 data,
+ struct dcb_output *);
+ int (*dp_train)(struct nv50_disp_priv *, int sor, int link,
+ u16 type, u16 mask, u32 data,
+ struct dcb_output *);
+ int (*dp_lnkctl)(struct nv50_disp_priv *, int sor, int link,
+ int head, u16 type, u16 mask, u32 data,
+ struct dcb_output *);
+ int (*dp_drvctl)(struct nv50_disp_priv *, int sor, int link,
+ int lane, u16 type, u16 mask, u32 data,
+ struct dcb_output *);
+ u32 lvdsconf;
+ } sor;
+};
+
+#define DAC_MTHD(n) (n), (n) + 0x03
+
+int nv50_dac_mthd(struct nouveau_object *, u32, void *, u32);
+int nv50_dac_power(struct nv50_disp_priv *, int, u32);
+int nv50_dac_sense(struct nv50_disp_priv *, int, u32);
+
+#define SOR_MTHD(n) (n), (n) + 0x3f
+
+int nva3_hda_eld(struct nv50_disp_priv *, int, u8 *, u32);
+int nvd0_hda_eld(struct nv50_disp_priv *, int, u8 *, u32);
+
+int nv84_hdmi_ctrl(struct nv50_disp_priv *, int, int, u32);
+int nva3_hdmi_ctrl(struct nv50_disp_priv *, int, int, u32);
+int nvd0_hdmi_ctrl(struct nv50_disp_priv *, int, int, u32);
+
+int nv50_sor_mthd(struct nouveau_object *, u32, void *, u32);
+int nv50_sor_power(struct nv50_disp_priv *, int, u32);
+
+int nv94_sor_dp_train_init(struct nv50_disp_priv *, int, int, int, u16, u16,
+ u32, struct dcb_output *);
+int nv94_sor_dp_train_fini(struct nv50_disp_priv *, int, int, int, u16, u16,
+ u32, struct dcb_output *);
+int nv94_sor_dp_train(struct nv50_disp_priv *, int, int, u16, u16, u32,
+ struct dcb_output *);
+int nv94_sor_dp_lnkctl(struct nv50_disp_priv *, int, int, int, u16, u16, u32,
+ struct dcb_output *);
+int nv94_sor_dp_drvctl(struct nv50_disp_priv *, int, int, int, u16, u16, u32,
+ struct dcb_output *);
+
+int nvd0_sor_dp_train(struct nv50_disp_priv *, int, int, u16, u16, u32,
+ struct dcb_output *);
+int nvd0_sor_dp_lnkctl(struct nv50_disp_priv *, int, int, int, u16, u16, u32,
+ struct dcb_output *);
+int nvd0_sor_dp_drvctl(struct nv50_disp_priv *, int, int, int, u16, u16, u32,
+ struct dcb_output *);
+
+struct nv50_disp_base {
+ struct nouveau_parent base;
+ struct nouveau_ramht *ramht;
+ u32 chan;
+};
+
+struct nv50_disp_chan {
+ struct nouveau_namedb base;
+ int chid;
+};
+
+int nv50_disp_chan_create_(struct nouveau_object *, struct nouveau_object *,
+ struct nouveau_oclass *, int, int, void **);
+void nv50_disp_chan_destroy(struct nv50_disp_chan *);
+u32 nv50_disp_chan_rd32(struct nouveau_object *, u64);
+void nv50_disp_chan_wr32(struct nouveau_object *, u64, u32);
+
+#define nv50_disp_chan_init(a) \
+ nouveau_namedb_init(&(a)->base)
+#define nv50_disp_chan_fini(a,b) \
+ nouveau_namedb_fini(&(a)->base, (b))
+
+int nv50_disp_dmac_create_(struct nouveau_object *, struct nouveau_object *,
+ struct nouveau_oclass *, u32, int, int, void **);
+void nv50_disp_dmac_dtor(struct nouveau_object *);
+
+struct nv50_disp_dmac {
+ struct nv50_disp_chan base;
+ struct nouveau_dmaobj *pushdma;
+ u32 push;
+};
+
+struct nv50_disp_pioc {
+ struct nv50_disp_chan base;
+};
+
+extern struct nouveau_ofuncs nv50_disp_mast_ofuncs;
+extern struct nouveau_ofuncs nv50_disp_sync_ofuncs;
+extern struct nouveau_ofuncs nv50_disp_ovly_ofuncs;
+extern struct nouveau_ofuncs nv50_disp_oimm_ofuncs;
+extern struct nouveau_ofuncs nv50_disp_curs_ofuncs;
+extern struct nouveau_ofuncs nv50_disp_base_ofuncs;
+extern struct nouveau_oclass nv50_disp_cclass;
+void nv50_disp_intr(struct nouveau_subdev *);
+
+extern struct nouveau_omthds nv84_disp_base_omthds[];
+
+extern struct nouveau_omthds nva3_disp_base_omthds[];
+
+extern struct nouveau_ofuncs nvd0_disp_mast_ofuncs;
+extern struct nouveau_ofuncs nvd0_disp_sync_ofuncs;
+extern struct nouveau_ofuncs nvd0_disp_ovly_ofuncs;
+extern struct nouveau_ofuncs nvd0_disp_oimm_ofuncs;
+extern struct nouveau_ofuncs nvd0_disp_curs_ofuncs;
+extern struct nouveau_ofuncs nvd0_disp_base_ofuncs;
+extern struct nouveau_oclass nvd0_disp_cclass;
+void nvd0_disp_intr(struct nouveau_subdev *);
+
+#endif
diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/nv84.c b/drivers/gpu/drm/nouveau/core/engine/disp/nv84.c
new file mode 100644
index 000000000000..fc84eacdfbec
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/engine/disp/nv84.c
@@ -0,0 +1,98 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include <engine/software.h>
+#include <engine/disp.h>
+
+#include <core/class.h>
+
+#include "nv50.h"
+
+static struct nouveau_oclass
+nv84_disp_sclass[] = {
+ { NV84_DISP_MAST_CLASS, &nv50_disp_mast_ofuncs },
+ { NV84_DISP_SYNC_CLASS, &nv50_disp_sync_ofuncs },
+ { NV84_DISP_OVLY_CLASS, &nv50_disp_ovly_ofuncs },
+ { NV84_DISP_OIMM_CLASS, &nv50_disp_oimm_ofuncs },
+ { NV84_DISP_CURS_CLASS, &nv50_disp_curs_ofuncs },
+ {}
+};
+
+struct nouveau_omthds
+nv84_disp_base_omthds[] = {
+ { SOR_MTHD(NV50_DISP_SOR_PWR) , nv50_sor_mthd },
+ { SOR_MTHD(NV84_DISP_SOR_HDMI_PWR) , nv50_sor_mthd },
+ { SOR_MTHD(NV50_DISP_SOR_LVDS_SCRIPT) , nv50_sor_mthd },
+ { DAC_MTHD(NV50_DISP_DAC_PWR) , nv50_dac_mthd },
+ { DAC_MTHD(NV50_DISP_DAC_LOAD) , nv50_dac_mthd },
+ {},
+};
+
+static struct nouveau_oclass
+nv84_disp_base_oclass[] = {
+ { NV84_DISP_CLASS, &nv50_disp_base_ofuncs, nv84_disp_base_omthds },
+ {}
+};
+
+static int
+nv84_disp_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
+ struct nouveau_oclass *oclass, void *data, u32 size,
+ struct nouveau_object **pobject)
+{
+ struct nv50_disp_priv *priv;
+ int ret;
+
+ ret = nouveau_disp_create(parent, engine, oclass, "PDISP",
+ "display", &priv);
+ *pobject = nv_object(priv);
+ if (ret)
+ return ret;
+
+ nv_engine(priv)->sclass = nv84_disp_base_oclass;
+ nv_engine(priv)->cclass = &nv50_disp_cclass;
+ nv_subdev(priv)->intr = nv50_disp_intr;
+ priv->sclass = nv84_disp_sclass;
+ priv->head.nr = 2;
+ priv->dac.nr = 3;
+ priv->sor.nr = 2;
+ priv->dac.power = nv50_dac_power;
+ priv->dac.sense = nv50_dac_sense;
+ priv->sor.power = nv50_sor_power;
+ priv->sor.hdmi = nv84_hdmi_ctrl;
+
+ INIT_LIST_HEAD(&priv->base.vblank.list);
+ spin_lock_init(&priv->base.vblank.lock);
+ return 0;
+}
+
+struct nouveau_oclass
+nv84_disp_oclass = {
+ .handle = NV_ENGINE(DISP, 0x82),
+ .ofuncs = &(struct nouveau_ofuncs) {
+ .ctor = nv84_disp_ctor,
+ .dtor = _nouveau_disp_dtor,
+ .init = _nouveau_disp_init,
+ .fini = _nouveau_disp_fini,
+ },
+};
diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/nv94.c b/drivers/gpu/drm/nouveau/core/engine/disp/nv94.c
new file mode 100644
index 000000000000..ba9dfd4669a2
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/engine/disp/nv94.c
@@ -0,0 +1,109 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include <engine/software.h>
+#include <engine/disp.h>
+
+#include <core/class.h>
+
+#include "nv50.h"
+
+static struct nouveau_oclass
+nv94_disp_sclass[] = {
+ { NV94_DISP_MAST_CLASS, &nv50_disp_mast_ofuncs },
+ { NV94_DISP_SYNC_CLASS, &nv50_disp_sync_ofuncs },
+ { NV94_DISP_OVLY_CLASS, &nv50_disp_ovly_ofuncs },
+ { NV94_DISP_OIMM_CLASS, &nv50_disp_oimm_ofuncs },
+ { NV94_DISP_CURS_CLASS, &nv50_disp_curs_ofuncs },
+ {}
+};
+
+static struct nouveau_omthds
+nv94_disp_base_omthds[] = {
+ { SOR_MTHD(NV50_DISP_SOR_PWR) , nv50_sor_mthd },
+ { SOR_MTHD(NV84_DISP_SOR_HDMI_PWR) , nv50_sor_mthd },
+ { SOR_MTHD(NV50_DISP_SOR_LVDS_SCRIPT) , nv50_sor_mthd },
+ { SOR_MTHD(NV94_DISP_SOR_DP_TRAIN) , nv50_sor_mthd },
+ { SOR_MTHD(NV94_DISP_SOR_DP_LNKCTL) , nv50_sor_mthd },
+ { SOR_MTHD(NV94_DISP_SOR_DP_DRVCTL(0)), nv50_sor_mthd },
+ { SOR_MTHD(NV94_DISP_SOR_DP_DRVCTL(1)), nv50_sor_mthd },
+ { SOR_MTHD(NV94_DISP_SOR_DP_DRVCTL(2)), nv50_sor_mthd },
+ { SOR_MTHD(NV94_DISP_SOR_DP_DRVCTL(3)), nv50_sor_mthd },
+ { DAC_MTHD(NV50_DISP_DAC_PWR) , nv50_dac_mthd },
+ { DAC_MTHD(NV50_DISP_DAC_LOAD) , nv50_dac_mthd },
+ {},
+};
+
+static struct nouveau_oclass
+nv94_disp_base_oclass[] = {
+ { NV94_DISP_CLASS, &nv50_disp_base_ofuncs, nv94_disp_base_omthds },
+ {}
+};
+
+static int
+nv94_disp_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
+ struct nouveau_oclass *oclass, void *data, u32 size,
+ struct nouveau_object **pobject)
+{
+ struct nv50_disp_priv *priv;
+ int ret;
+
+ ret = nouveau_disp_create(parent, engine, oclass, "PDISP",
+ "display", &priv);
+ *pobject = nv_object(priv);
+ if (ret)
+ return ret;
+
+ nv_engine(priv)->sclass = nv94_disp_base_oclass;
+ nv_engine(priv)->cclass = &nv50_disp_cclass;
+ nv_subdev(priv)->intr = nv50_disp_intr;
+ priv->sclass = nv94_disp_sclass;
+ priv->head.nr = 2;
+ priv->dac.nr = 3;
+ priv->sor.nr = 4;
+ priv->dac.power = nv50_dac_power;
+ priv->dac.sense = nv50_dac_sense;
+ priv->sor.power = nv50_sor_power;
+ priv->sor.hdmi = nv84_hdmi_ctrl;
+ priv->sor.dp_train = nv94_sor_dp_train;
+ priv->sor.dp_train_init = nv94_sor_dp_train_init;
+ priv->sor.dp_train_fini = nv94_sor_dp_train_fini;
+ priv->sor.dp_lnkctl = nv94_sor_dp_lnkctl;
+ priv->sor.dp_drvctl = nv94_sor_dp_drvctl;
+
+ INIT_LIST_HEAD(&priv->base.vblank.list);
+ spin_lock_init(&priv->base.vblank.lock);
+ return 0;
+}
+
+struct nouveau_oclass
+nv94_disp_oclass = {
+ .handle = NV_ENGINE(DISP, 0x88),
+ .ofuncs = &(struct nouveau_ofuncs) {
+ .ctor = nv94_disp_ctor,
+ .dtor = _nouveau_disp_dtor,
+ .init = _nouveau_disp_init,
+ .fini = _nouveau_disp_fini,
+ },
+};
diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/nva0.c b/drivers/gpu/drm/nouveau/core/engine/disp/nva0.c
new file mode 100644
index 000000000000..5d63902cdeda
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/engine/disp/nva0.c
@@ -0,0 +1,88 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include <engine/software.h>
+#include <engine/disp.h>
+
+#include <core/class.h>
+
+#include "nv50.h"
+
+static struct nouveau_oclass
+nva0_disp_sclass[] = {
+ { NVA0_DISP_MAST_CLASS, &nv50_disp_mast_ofuncs },
+ { NVA0_DISP_SYNC_CLASS, &nv50_disp_sync_ofuncs },
+ { NVA0_DISP_OVLY_CLASS, &nv50_disp_ovly_ofuncs },
+ { NVA0_DISP_OIMM_CLASS, &nv50_disp_oimm_ofuncs },
+ { NVA0_DISP_CURS_CLASS, &nv50_disp_curs_ofuncs },
+ {}
+};
+
+static struct nouveau_oclass
+nva0_disp_base_oclass[] = {
+ { NVA0_DISP_CLASS, &nv50_disp_base_ofuncs, nv84_disp_base_omthds },
+ {}
+};
+
+static int
+nva0_disp_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
+ struct nouveau_oclass *oclass, void *data, u32 size,
+ struct nouveau_object **pobject)
+{
+ struct nv50_disp_priv *priv;
+ int ret;
+
+ ret = nouveau_disp_create(parent, engine, oclass, "PDISP",
+ "display", &priv);
+ *pobject = nv_object(priv);
+ if (ret)
+ return ret;
+
+ nv_engine(priv)->sclass = nva0_disp_base_oclass;
+ nv_engine(priv)->cclass = &nv50_disp_cclass;
+ nv_subdev(priv)->intr = nv50_disp_intr;
+ priv->sclass = nva0_disp_sclass;
+ priv->head.nr = 2;
+ priv->dac.nr = 3;
+ priv->sor.nr = 2;
+ priv->dac.power = nv50_dac_power;
+ priv->dac.sense = nv50_dac_sense;
+ priv->sor.power = nv50_sor_power;
+ priv->sor.hdmi = nv84_hdmi_ctrl;
+
+ INIT_LIST_HEAD(&priv->base.vblank.list);
+ spin_lock_init(&priv->base.vblank.lock);
+ return 0;
+}
+
+struct nouveau_oclass
+nva0_disp_oclass = {
+ .handle = NV_ENGINE(DISP, 0x83),
+ .ofuncs = &(struct nouveau_ofuncs) {
+ .ctor = nva0_disp_ctor,
+ .dtor = _nouveau_disp_dtor,
+ .init = _nouveau_disp_init,
+ .fini = _nouveau_disp_fini,
+ },
+};
diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/nva3.c b/drivers/gpu/drm/nouveau/core/engine/disp/nva3.c
new file mode 100644
index 000000000000..e9192ca389fa
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/engine/disp/nva3.c
@@ -0,0 +1,111 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include <engine/software.h>
+#include <engine/disp.h>
+
+#include <core/class.h>
+
+#include "nv50.h"
+
+static struct nouveau_oclass
+nva3_disp_sclass[] = {
+ { NVA3_DISP_MAST_CLASS, &nv50_disp_mast_ofuncs },
+ { NVA3_DISP_SYNC_CLASS, &nv50_disp_sync_ofuncs },
+ { NVA3_DISP_OVLY_CLASS, &nv50_disp_ovly_ofuncs },
+ { NVA3_DISP_OIMM_CLASS, &nv50_disp_oimm_ofuncs },
+ { NVA3_DISP_CURS_CLASS, &nv50_disp_curs_ofuncs },
+ {}
+};
+
+struct nouveau_omthds
+nva3_disp_base_omthds[] = {
+ { SOR_MTHD(NV50_DISP_SOR_PWR) , nv50_sor_mthd },
+ { SOR_MTHD(NVA3_DISP_SOR_HDA_ELD) , nv50_sor_mthd },
+ { SOR_MTHD(NV84_DISP_SOR_HDMI_PWR) , nv50_sor_mthd },
+ { SOR_MTHD(NV50_DISP_SOR_LVDS_SCRIPT) , nv50_sor_mthd },
+ { SOR_MTHD(NV94_DISP_SOR_DP_TRAIN) , nv50_sor_mthd },
+ { SOR_MTHD(NV94_DISP_SOR_DP_LNKCTL) , nv50_sor_mthd },
+ { SOR_MTHD(NV94_DISP_SOR_DP_DRVCTL(0)), nv50_sor_mthd },
+ { SOR_MTHD(NV94_DISP_SOR_DP_DRVCTL(1)), nv50_sor_mthd },
+ { SOR_MTHD(NV94_DISP_SOR_DP_DRVCTL(2)), nv50_sor_mthd },
+ { SOR_MTHD(NV94_DISP_SOR_DP_DRVCTL(3)), nv50_sor_mthd },
+ { DAC_MTHD(NV50_DISP_DAC_PWR) , nv50_dac_mthd },
+ { DAC_MTHD(NV50_DISP_DAC_LOAD) , nv50_dac_mthd },
+ {},
+};
+
+static struct nouveau_oclass
+nva3_disp_base_oclass[] = {
+ { NVA3_DISP_CLASS, &nv50_disp_base_ofuncs, nva3_disp_base_omthds },
+ {}
+};
+
+static int
+nva3_disp_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
+ struct nouveau_oclass *oclass, void *data, u32 size,
+ struct nouveau_object **pobject)
+{
+ struct nv50_disp_priv *priv;
+ int ret;
+
+ ret = nouveau_disp_create(parent, engine, oclass, "PDISP",
+ "display", &priv);
+ *pobject = nv_object(priv);
+ if (ret)
+ return ret;
+
+ nv_engine(priv)->sclass = nva3_disp_base_oclass;
+ nv_engine(priv)->cclass = &nv50_disp_cclass;
+ nv_subdev(priv)->intr = nv50_disp_intr;
+ priv->sclass = nva3_disp_sclass;
+ priv->head.nr = 2;
+ priv->dac.nr = 3;
+ priv->sor.nr = 4;
+ priv->dac.power = nv50_dac_power;
+ priv->dac.sense = nv50_dac_sense;
+ priv->sor.power = nv50_sor_power;
+ priv->sor.hda_eld = nva3_hda_eld;
+ priv->sor.hdmi = nva3_hdmi_ctrl;
+ priv->sor.dp_train = nv94_sor_dp_train;
+ priv->sor.dp_train_init = nv94_sor_dp_train_init;
+ priv->sor.dp_train_fini = nv94_sor_dp_train_fini;
+ priv->sor.dp_lnkctl = nv94_sor_dp_lnkctl;
+ priv->sor.dp_drvctl = nv94_sor_dp_drvctl;
+
+ INIT_LIST_HEAD(&priv->base.vblank.list);
+ spin_lock_init(&priv->base.vblank.lock);
+ return 0;
+}
+
+struct nouveau_oclass
+nva3_disp_oclass = {
+ .handle = NV_ENGINE(DISP, 0x85),
+ .ofuncs = &(struct nouveau_ofuncs) {
+ .ctor = nva3_disp_ctor,
+ .dtor = _nouveau_disp_dtor,
+ .init = _nouveau_disp_init,
+ .fini = _nouveau_disp_fini,
+ },
+};
diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/nvd0.c b/drivers/gpu/drm/nouveau/core/engine/disp/nvd0.c
index d93efbcf75b8..9e38ebff5fb3 100644
--- a/drivers/gpu/drm/nouveau/core/engine/disp/nvd0.c
+++ b/drivers/gpu/drm/nouveau/core/engine/disp/nvd0.c
@@ -22,22 +22,808 @@
* Authors: Ben Skeggs
*/
-#include <subdev/bar.h>
+#include <core/object.h>
+#include <core/parent.h>
+#include <core/handle.h>
+#include <core/class.h>
#include <engine/software.h>
#include <engine/disp.h>
-struct nvd0_disp_priv {
- struct nouveau_disp base;
+#include <subdev/timer.h>
+#include <subdev/fb.h>
+#include <subdev/bar.h>
+#include <subdev/clock.h>
+
+#include <subdev/bios.h>
+#include <subdev/bios/dcb.h>
+#include <subdev/bios/disp.h>
+#include <subdev/bios/init.h>
+#include <subdev/bios/pll.h>
+
+#include "nv50.h"
+
+/*******************************************************************************
+ * EVO DMA channel base class
+ ******************************************************************************/
+
+static int
+nvd0_disp_dmac_object_attach(struct nouveau_object *parent,
+ struct nouveau_object *object, u32 name)
+{
+ struct nv50_disp_base *base = (void *)parent->parent;
+ struct nv50_disp_chan *chan = (void *)parent;
+ u32 addr = nv_gpuobj(object)->node->offset;
+ u32 data = (chan->chid << 27) | (addr << 9) | 0x00000001;
+ return nouveau_ramht_insert(base->ramht, chan->chid, name, data);
+}
+
+static void
+nvd0_disp_dmac_object_detach(struct nouveau_object *parent, int cookie)
+{
+ struct nv50_disp_base *base = (void *)parent->parent;
+ nouveau_ramht_remove(base->ramht, cookie);
+}
+
+static int
+nvd0_disp_dmac_init(struct nouveau_object *object)
+{
+ struct nv50_disp_priv *priv = (void *)object->engine;
+ struct nv50_disp_dmac *dmac = (void *)object;
+ int chid = dmac->base.chid;
+ int ret;
+
+ ret = nv50_disp_chan_init(&dmac->base);
+ if (ret)
+ return ret;
+
+ /* enable error reporting */
+ nv_mask(priv, 0x610090, 0x00000001 << chid, 0x00000001 << chid);
+ nv_mask(priv, 0x6100a0, 0x00000001 << chid, 0x00000001 << chid);
+
+ /* initialise channel for dma command submission */
+ nv_wr32(priv, 0x610494 + (chid * 0x0010), dmac->push);
+ nv_wr32(priv, 0x610498 + (chid * 0x0010), 0x00010000);
+ nv_wr32(priv, 0x61049c + (chid * 0x0010), 0x00000001);
+ nv_mask(priv, 0x610490 + (chid * 0x0010), 0x00000010, 0x00000010);
+ nv_wr32(priv, 0x640000 + (chid * 0x1000), 0x00000000);
+ nv_wr32(priv, 0x610490 + (chid * 0x0010), 0x00000013);
+
+ /* wait for it to go inactive */
+ if (!nv_wait(priv, 0x610490 + (chid * 0x10), 0x80000000, 0x00000000)) {
+ nv_error(dmac, "init: 0x%08x\n",
+ nv_rd32(priv, 0x610490 + (chid * 0x10)));
+ return -EBUSY;
+ }
+
+ return 0;
+}
+
+static int
+nvd0_disp_dmac_fini(struct nouveau_object *object, bool suspend)
+{
+ struct nv50_disp_priv *priv = (void *)object->engine;
+ struct nv50_disp_dmac *dmac = (void *)object;
+ int chid = dmac->base.chid;
+
+ /* deactivate channel */
+ nv_mask(priv, 0x610490 + (chid * 0x0010), 0x00001010, 0x00001000);
+ nv_mask(priv, 0x610490 + (chid * 0x0010), 0x00000003, 0x00000000);
+ if (!nv_wait(priv, 0x610490 + (chid * 0x10), 0x001e0000, 0x00000000)) {
+ nv_error(dmac, "fini: 0x%08x\n",
+ nv_rd32(priv, 0x610490 + (chid * 0x10)));
+ if (suspend)
+ return -EBUSY;
+ }
+
+ /* disable error reporting */
+ nv_mask(priv, 0x610090, 0x00000001 << chid, 0x00000000);
+ nv_mask(priv, 0x6100a0, 0x00000001 << chid, 0x00000000);
+
+ return nv50_disp_chan_fini(&dmac->base, suspend);
+}
+
+/*******************************************************************************
+ * EVO master channel object
+ ******************************************************************************/
+
+static int
+nvd0_disp_mast_ctor(struct nouveau_object *parent,
+ struct nouveau_object *engine,
+ struct nouveau_oclass *oclass, void *data, u32 size,
+ struct nouveau_object **pobject)
+{
+ struct nv50_display_mast_class *args = data;
+ struct nv50_disp_dmac *mast;
+ int ret;
+
+ if (size < sizeof(*args))
+ return -EINVAL;
+
+ ret = nv50_disp_dmac_create_(parent, engine, oclass, args->pushbuf,
+ 0, sizeof(*mast), (void **)&mast);
+ *pobject = nv_object(mast);
+ if (ret)
+ return ret;
+
+ nv_parent(mast)->object_attach = nvd0_disp_dmac_object_attach;
+ nv_parent(mast)->object_detach = nvd0_disp_dmac_object_detach;
+ return 0;
+}
+
+static int
+nvd0_disp_mast_init(struct nouveau_object *object)
+{
+ struct nv50_disp_priv *priv = (void *)object->engine;
+ struct nv50_disp_dmac *mast = (void *)object;
+ int ret;
+
+ ret = nv50_disp_chan_init(&mast->base);
+ if (ret)
+ return ret;
+
+ /* enable error reporting */
+ nv_mask(priv, 0x610090, 0x00000001, 0x00000001);
+ nv_mask(priv, 0x6100a0, 0x00000001, 0x00000001);
+
+ /* initialise channel for dma command submission */
+ nv_wr32(priv, 0x610494, mast->push);
+ nv_wr32(priv, 0x610498, 0x00010000);
+ nv_wr32(priv, 0x61049c, 0x00000001);
+ nv_mask(priv, 0x610490, 0x00000010, 0x00000010);
+ nv_wr32(priv, 0x640000, 0x00000000);
+ nv_wr32(priv, 0x610490, 0x01000013);
+
+ /* wait for it to go inactive */
+ if (!nv_wait(priv, 0x610490, 0x80000000, 0x00000000)) {
+ nv_error(mast, "init: 0x%08x\n", nv_rd32(priv, 0x610490));
+ return -EBUSY;
+ }
+
+ return 0;
+}
+
+static int
+nvd0_disp_mast_fini(struct nouveau_object *object, bool suspend)
+{
+ struct nv50_disp_priv *priv = (void *)object->engine;
+ struct nv50_disp_dmac *mast = (void *)object;
+
+ /* deactivate channel */
+ nv_mask(priv, 0x610490, 0x00000010, 0x00000000);
+ nv_mask(priv, 0x610490, 0x00000003, 0x00000000);
+ if (!nv_wait(priv, 0x610490, 0x001e0000, 0x00000000)) {
+ nv_error(mast, "fini: 0x%08x\n", nv_rd32(priv, 0x610490));
+ if (suspend)
+ return -EBUSY;
+ }
+
+ /* disable error reporting */
+ nv_mask(priv, 0x610090, 0x00000001, 0x00000000);
+ nv_mask(priv, 0x6100a0, 0x00000001, 0x00000000);
+
+ return nv50_disp_chan_fini(&mast->base, suspend);
+}
+
+struct nouveau_ofuncs
+nvd0_disp_mast_ofuncs = {
+ .ctor = nvd0_disp_mast_ctor,
+ .dtor = nv50_disp_dmac_dtor,
+ .init = nvd0_disp_mast_init,
+ .fini = nvd0_disp_mast_fini,
+ .rd32 = nv50_disp_chan_rd32,
+ .wr32 = nv50_disp_chan_wr32,
+};
+
+/*******************************************************************************
+ * EVO sync channel objects
+ ******************************************************************************/
+
+static int
+nvd0_disp_sync_ctor(struct nouveau_object *parent,
+ struct nouveau_object *engine,
+ struct nouveau_oclass *oclass, void *data, u32 size,
+ struct nouveau_object **pobject)
+{
+ struct nv50_display_sync_class *args = data;
+ struct nv50_disp_priv *priv = (void *)engine;
+ struct nv50_disp_dmac *dmac;
+ int ret;
+
+ if (size < sizeof(*data) || args->head >= priv->head.nr)
+ return -EINVAL;
+
+ ret = nv50_disp_dmac_create_(parent, engine, oclass, args->pushbuf,
+ 1 + args->head, sizeof(*dmac),
+ (void **)&dmac);
+ *pobject = nv_object(dmac);
+ if (ret)
+ return ret;
+
+ nv_parent(dmac)->object_attach = nvd0_disp_dmac_object_attach;
+ nv_parent(dmac)->object_detach = nvd0_disp_dmac_object_detach;
+ return 0;
+}
+
+struct nouveau_ofuncs
+nvd0_disp_sync_ofuncs = {
+ .ctor = nvd0_disp_sync_ctor,
+ .dtor = nv50_disp_dmac_dtor,
+ .init = nvd0_disp_dmac_init,
+ .fini = nvd0_disp_dmac_fini,
+ .rd32 = nv50_disp_chan_rd32,
+ .wr32 = nv50_disp_chan_wr32,
+};
+
+/*******************************************************************************
+ * EVO overlay channel objects
+ ******************************************************************************/
+
+static int
+nvd0_disp_ovly_ctor(struct nouveau_object *parent,
+ struct nouveau_object *engine,
+ struct nouveau_oclass *oclass, void *data, u32 size,
+ struct nouveau_object **pobject)
+{
+ struct nv50_display_ovly_class *args = data;
+ struct nv50_disp_priv *priv = (void *)engine;
+ struct nv50_disp_dmac *dmac;
+ int ret;
+
+ if (size < sizeof(*data) || args->head >= priv->head.nr)
+ return -EINVAL;
+
+ ret = nv50_disp_dmac_create_(parent, engine, oclass, args->pushbuf,
+ 5 + args->head, sizeof(*dmac),
+ (void **)&dmac);
+ *pobject = nv_object(dmac);
+ if (ret)
+ return ret;
+
+ nv_parent(dmac)->object_attach = nvd0_disp_dmac_object_attach;
+ nv_parent(dmac)->object_detach = nvd0_disp_dmac_object_detach;
+ return 0;
+}
+
+struct nouveau_ofuncs
+nvd0_disp_ovly_ofuncs = {
+ .ctor = nvd0_disp_ovly_ctor,
+ .dtor = nv50_disp_dmac_dtor,
+ .init = nvd0_disp_dmac_init,
+ .fini = nvd0_disp_dmac_fini,
+ .rd32 = nv50_disp_chan_rd32,
+ .wr32 = nv50_disp_chan_wr32,
+};
+
+/*******************************************************************************
+ * EVO PIO channel base class
+ ******************************************************************************/
+
+static int
+nvd0_disp_pioc_create_(struct nouveau_object *parent,
+ struct nouveau_object *engine,
+ struct nouveau_oclass *oclass, int chid,
+ int length, void **pobject)
+{
+ return nv50_disp_chan_create_(parent, engine, oclass, chid,
+ length, pobject);
+}
+
+static void
+nvd0_disp_pioc_dtor(struct nouveau_object *object)
+{
+ struct nv50_disp_pioc *pioc = (void *)object;
+ nv50_disp_chan_destroy(&pioc->base);
+}
+
+static int
+nvd0_disp_pioc_init(struct nouveau_object *object)
+{
+ struct nv50_disp_priv *priv = (void *)object->engine;
+ struct nv50_disp_pioc *pioc = (void *)object;
+ int chid = pioc->base.chid;
+ int ret;
+
+ ret = nv50_disp_chan_init(&pioc->base);
+ if (ret)
+ return ret;
+
+ /* enable error reporting */
+ nv_mask(priv, 0x610090, 0x00000001 << chid, 0x00000001 << chid);
+ nv_mask(priv, 0x6100a0, 0x00000001 << chid, 0x00000001 << chid);
+
+ /* activate channel */
+ nv_wr32(priv, 0x610490 + (chid * 0x10), 0x00000001);
+ if (!nv_wait(priv, 0x610490 + (chid * 0x10), 0x00030000, 0x00010000)) {
+ nv_error(pioc, "init: 0x%08x\n",
+ nv_rd32(priv, 0x610490 + (chid * 0x10)));
+ return -EBUSY;
+ }
+
+ return 0;
+}
+
+static int
+nvd0_disp_pioc_fini(struct nouveau_object *object, bool suspend)
+{
+ struct nv50_disp_priv *priv = (void *)object->engine;
+ struct nv50_disp_pioc *pioc = (void *)object;
+ int chid = pioc->base.chid;
+
+ nv_mask(priv, 0x610490 + (chid * 0x10), 0x00000001, 0x00000000);
+ if (!nv_wait(priv, 0x610490 + (chid * 0x10), 0x00030000, 0x00000000)) {
+ nv_error(pioc, "timeout: 0x%08x\n",
+ nv_rd32(priv, 0x610490 + (chid * 0x10)));
+ if (suspend)
+ return -EBUSY;
+ }
+
+ /* disable error reporting */
+ nv_mask(priv, 0x610090, 0x00000001 << chid, 0x00000000);
+ nv_mask(priv, 0x6100a0, 0x00000001 << chid, 0x00000000);
+
+ return nv50_disp_chan_fini(&pioc->base, suspend);
+}
+
+/*******************************************************************************
+ * EVO immediate overlay channel objects
+ ******************************************************************************/
+
+static int
+nvd0_disp_oimm_ctor(struct nouveau_object *parent,
+ struct nouveau_object *engine,
+ struct nouveau_oclass *oclass, void *data, u32 size,
+ struct nouveau_object **pobject)
+{
+ struct nv50_display_oimm_class *args = data;
+ struct nv50_disp_priv *priv = (void *)engine;
+ struct nv50_disp_pioc *pioc;
+ int ret;
+
+ if (size < sizeof(*args) || args->head >= priv->head.nr)
+ return -EINVAL;
+
+ ret = nvd0_disp_pioc_create_(parent, engine, oclass, 9 + args->head,
+ sizeof(*pioc), (void **)&pioc);
+ *pobject = nv_object(pioc);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+struct nouveau_ofuncs
+nvd0_disp_oimm_ofuncs = {
+ .ctor = nvd0_disp_oimm_ctor,
+ .dtor = nvd0_disp_pioc_dtor,
+ .init = nvd0_disp_pioc_init,
+ .fini = nvd0_disp_pioc_fini,
+ .rd32 = nv50_disp_chan_rd32,
+ .wr32 = nv50_disp_chan_wr32,
+};
+
+/*******************************************************************************
+ * EVO cursor channel objects
+ ******************************************************************************/
+
+static int
+nvd0_disp_curs_ctor(struct nouveau_object *parent,
+ struct nouveau_object *engine,
+ struct nouveau_oclass *oclass, void *data, u32 size,
+ struct nouveau_object **pobject)
+{
+ struct nv50_display_curs_class *args = data;
+ struct nv50_disp_priv *priv = (void *)engine;
+ struct nv50_disp_pioc *pioc;
+ int ret;
+
+ if (size < sizeof(*args) || args->head >= priv->head.nr)
+ return -EINVAL;
+
+ ret = nvd0_disp_pioc_create_(parent, engine, oclass, 13 + args->head,
+ sizeof(*pioc), (void **)&pioc);
+ *pobject = nv_object(pioc);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+struct nouveau_ofuncs
+nvd0_disp_curs_ofuncs = {
+ .ctor = nvd0_disp_curs_ctor,
+ .dtor = nvd0_disp_pioc_dtor,
+ .init = nvd0_disp_pioc_init,
+ .fini = nvd0_disp_pioc_fini,
+ .rd32 = nv50_disp_chan_rd32,
+ .wr32 = nv50_disp_chan_wr32,
+};
+
+/*******************************************************************************
+ * Base display object
+ ******************************************************************************/
+
+static int
+nvd0_disp_base_ctor(struct nouveau_object *parent,
+ struct nouveau_object *engine,
+ struct nouveau_oclass *oclass, void *data, u32 size,
+ struct nouveau_object **pobject)
+{
+ struct nv50_disp_priv *priv = (void *)engine;
+ struct nv50_disp_base *base;
+ int ret;
+
+ ret = nouveau_parent_create(parent, engine, oclass, 0,
+ priv->sclass, 0, &base);
+ *pobject = nv_object(base);
+ if (ret)
+ return ret;
+
+ return nouveau_ramht_new(parent, parent, 0x1000, 0, &base->ramht);
+}
+
+static void
+nvd0_disp_base_dtor(struct nouveau_object *object)
+{
+ struct nv50_disp_base *base = (void *)object;
+ nouveau_ramht_ref(NULL, &base->ramht);
+ nouveau_parent_destroy(&base->base);
+}
+
+static int
+nvd0_disp_base_init(struct nouveau_object *object)
+{
+ struct nv50_disp_priv *priv = (void *)object->engine;
+ struct nv50_disp_base *base = (void *)object;
+ int ret, i;
+ u32 tmp;
+
+ ret = nouveau_parent_init(&base->base);
+ if (ret)
+ return ret;
+
+ /* The below segments of code copying values from one register to
+ * another appear to inform EVO of the display capabilities or
+ * something similar.
+ */
+
+ /* ... CRTC caps */
+ for (i = 0; i < priv->head.nr; i++) {
+ tmp = nv_rd32(priv, 0x616104 + (i * 0x800));
+ nv_wr32(priv, 0x6101b4 + (i * 0x800), tmp);
+ tmp = nv_rd32(priv, 0x616108 + (i * 0x800));
+ nv_wr32(priv, 0x6101b8 + (i * 0x800), tmp);
+ tmp = nv_rd32(priv, 0x61610c + (i * 0x800));
+ nv_wr32(priv, 0x6101bc + (i * 0x800), tmp);
+ }
+
+ /* ... DAC caps */
+ for (i = 0; i < priv->dac.nr; i++) {
+ tmp = nv_rd32(priv, 0x61a000 + (i * 0x800));
+ nv_wr32(priv, 0x6101c0 + (i * 0x800), tmp);
+ }
+
+ /* ... SOR caps */
+ for (i = 0; i < priv->sor.nr; i++) {
+ tmp = nv_rd32(priv, 0x61c000 + (i * 0x800));
+ nv_wr32(priv, 0x6301c4 + (i * 0x800), tmp);
+ }
+
+ /* steal display away from vbios, or something like that */
+ if (nv_rd32(priv, 0x6100ac) & 0x00000100) {
+ nv_wr32(priv, 0x6100ac, 0x00000100);
+ nv_mask(priv, 0x6194e8, 0x00000001, 0x00000000);
+ if (!nv_wait(priv, 0x6194e8, 0x00000002, 0x00000000)) {
+ nv_error(priv, "timeout acquiring display\n");
+ return -EBUSY;
+ }
+ }
+
+ /* point at display engine memory area (hash table, objects) */
+ nv_wr32(priv, 0x610010, (nv_gpuobj(object->parent)->addr >> 8) | 9);
+
+ /* enable supervisor interrupts, disable everything else */
+ nv_wr32(priv, 0x610090, 0x00000000);
+ nv_wr32(priv, 0x6100a0, 0x00000000);
+ nv_wr32(priv, 0x6100b0, 0x00000307);
+
+ return 0;
+}
+
+static int
+nvd0_disp_base_fini(struct nouveau_object *object, bool suspend)
+{
+ struct nv50_disp_priv *priv = (void *)object->engine;
+ struct nv50_disp_base *base = (void *)object;
+
+ /* disable all interrupts */
+ nv_wr32(priv, 0x6100b0, 0x00000000);
+
+ return nouveau_parent_fini(&base->base, suspend);
+}
+
+struct nouveau_ofuncs
+nvd0_disp_base_ofuncs = {
+ .ctor = nvd0_disp_base_ctor,
+ .dtor = nvd0_disp_base_dtor,
+ .init = nvd0_disp_base_init,
+ .fini = nvd0_disp_base_fini,
+};
+
+static struct nouveau_oclass
+nvd0_disp_base_oclass[] = {
+ { NVD0_DISP_CLASS, &nvd0_disp_base_ofuncs, nva3_disp_base_omthds },
+ {}
};
static struct nouveau_oclass
nvd0_disp_sclass[] = {
- {},
+ { NVD0_DISP_MAST_CLASS, &nvd0_disp_mast_ofuncs },
+ { NVD0_DISP_SYNC_CLASS, &nvd0_disp_sync_ofuncs },
+ { NVD0_DISP_OVLY_CLASS, &nvd0_disp_ovly_ofuncs },
+ { NVD0_DISP_OIMM_CLASS, &nvd0_disp_oimm_ofuncs },
+ { NVD0_DISP_CURS_CLASS, &nvd0_disp_curs_ofuncs },
+ {}
};
+/*******************************************************************************
+ * Display engine implementation
+ ******************************************************************************/
+
+static u16
+exec_lookup(struct nv50_disp_priv *priv, int head, int outp, u32 ctrl,
+ struct dcb_output *dcb, u8 *ver, u8 *hdr, u8 *cnt, u8 *len,
+ struct nvbios_outp *info)
+{
+ struct nouveau_bios *bios = nouveau_bios(priv);
+ u16 mask, type, data;
+
+ if (outp < 4) {
+ type = DCB_OUTPUT_ANALOG;
+ mask = 0;
+ } else {
+ outp -= 4;
+ switch (ctrl & 0x00000f00) {
+ case 0x00000000: type = DCB_OUTPUT_LVDS; mask = 1; break;
+ case 0x00000100: type = DCB_OUTPUT_TMDS; mask = 1; break;
+ case 0x00000200: type = DCB_OUTPUT_TMDS; mask = 2; break;
+ case 0x00000500: type = DCB_OUTPUT_TMDS; mask = 3; break;
+ case 0x00000800: type = DCB_OUTPUT_DP; mask = 1; break;
+ case 0x00000900: type = DCB_OUTPUT_DP; mask = 2; break;
+ default:
+ nv_error(priv, "unknown SOR mc 0x%08x\n", ctrl);
+ return 0x0000;
+ }
+ dcb->sorconf.link = mask;
+ }
+
+ mask = 0x00c0 & (mask << 6);
+ mask |= 0x0001 << outp;
+ mask |= 0x0100 << head;
+
+ data = dcb_outp_match(bios, type, mask, ver, hdr, dcb);
+ if (!data)
+ return 0x0000;
+
+ return nvbios_outp_match(bios, type, mask, ver, hdr, cnt, len, info);
+}
+
+static bool
+exec_script(struct nv50_disp_priv *priv, int head, int outp, u32 ctrl, int id)
+{
+ struct nouveau_bios *bios = nouveau_bios(priv);
+ struct nvbios_outp info;
+ struct dcb_output dcb;
+ u8 ver, hdr, cnt, len;
+ u16 data;
+
+ data = exec_lookup(priv, head, outp, ctrl, &dcb, &ver, &hdr, &cnt, &len, &info);
+ if (data) {
+ struct nvbios_init init = {
+ .subdev = nv_subdev(priv),
+ .bios = bios,
+ .offset = info.script[id],
+ .outp = &dcb,
+ .crtc = head,
+ .execute = 1,
+ };
+
+ return nvbios_exec(&init) == 0;
+ }
+
+ return false;
+}
+
+static u32
+exec_clkcmp(struct nv50_disp_priv *priv, int head, int outp,
+ u32 ctrl, int id, u32 pclk)
+{
+ struct nouveau_bios *bios = nouveau_bios(priv);
+ struct nvbios_outp info1;
+ struct nvbios_ocfg info2;
+ struct dcb_output dcb;
+ u8 ver, hdr, cnt, len;
+ u16 data, conf;
+
+ data = exec_lookup(priv, head, outp, ctrl, &dcb, &ver, &hdr, &cnt, &len, &info1);
+ if (data == 0x0000)
+ return false;
+
+ switch (dcb.type) {
+ case DCB_OUTPUT_TMDS:
+ conf = (ctrl & 0x00000f00) >> 8;
+ if (pclk >= 165000)
+ conf |= 0x0100;
+ break;
+ case DCB_OUTPUT_LVDS:
+ conf = priv->sor.lvdsconf;
+ break;
+ case DCB_OUTPUT_DP:
+ conf = (ctrl & 0x00000f00) >> 8;
+ break;
+ case DCB_OUTPUT_ANALOG:
+ default:
+ conf = 0x00ff;
+ break;
+ }
+
+ data = nvbios_ocfg_match(bios, data, conf, &ver, &hdr, &cnt, &len, &info2);
+ if (data) {
+ data = nvbios_oclk_match(bios, info2.clkcmp[id], pclk);
+ if (data) {
+ struct nvbios_init init = {
+ .subdev = nv_subdev(priv),
+ .bios = bios,
+ .offset = data,
+ .outp = &dcb,
+ .crtc = head,
+ .execute = 1,
+ };
+
+ if (nvbios_exec(&init))
+ return 0x0000;
+ return conf;
+ }
+ }
+
+ return 0x0000;
+}
+
+static void
+nvd0_display_unk1_handler(struct nv50_disp_priv *priv, u32 head, u32 mask)
+{
+ int i;
+
+ for (i = 0; mask && i < 8; i++) {
+ u32 mcc = nv_rd32(priv, 0x640180 + (i * 0x20));
+ if (mcc & (1 << head))
+ exec_script(priv, head, i, mcc, 1);
+ }
+
+ nv_wr32(priv, 0x6101d4, 0x00000000);
+ nv_wr32(priv, 0x6109d4, 0x00000000);
+ nv_wr32(priv, 0x6101d0, 0x80000000);
+}
+
static void
-nvd0_disp_intr_vblank(struct nvd0_disp_priv *priv, int crtc)
+nvd0_display_unk2_calc_tu(struct nv50_disp_priv *priv, int head, int or)
+{
+ const u32 ctrl = nv_rd32(priv, 0x660200 + (or * 0x020));
+ const u32 conf = nv_rd32(priv, 0x660404 + (head * 0x300));
+ const u32 pclk = nv_rd32(priv, 0x660450 + (head * 0x300)) / 1000;
+ const u32 link = ((ctrl & 0xf00) == 0x800) ? 0 : 1;
+ const u32 hoff = (head * 0x800);
+ const u32 soff = ( or * 0x800);
+ const u32 loff = (link * 0x080) + soff;
+ const u32 symbol = 100000;
+ const u32 TU = 64;
+ u32 dpctrl = nv_rd32(priv, 0x61c10c + loff) & 0x000f0000;
+ u32 clksor = nv_rd32(priv, 0x612300 + soff);
+ u32 datarate, link_nr, link_bw, bits;
+ u64 ratio, value;
+
+ if ((conf & 0x3c0) == 0x180) bits = 30;
+ else if ((conf & 0x3c0) == 0x140) bits = 24;
+ else bits = 18;
+ datarate = (pclk * bits) / 8;
+
+ if (dpctrl > 0x00030000) link_nr = 4;
+ else if (dpctrl > 0x00010000) link_nr = 2;
+ else link_nr = 1;
+
+ link_bw = (clksor & 0x007c0000) >> 18;
+ link_bw *= 27000;
+
+ ratio = datarate;
+ ratio *= symbol;
+ do_div(ratio, link_nr * link_bw);
+
+ value = (symbol - ratio) * TU;
+ value *= ratio;
+ do_div(value, symbol);
+ do_div(value, symbol);
+
+ value += 5;
+ value |= 0x08000000;
+
+ nv_wr32(priv, 0x616610 + hoff, value);
+}
+
+static void
+nvd0_display_unk2_handler(struct nv50_disp_priv *priv, u32 head, u32 mask)
+{
+ u32 pclk;
+ int i;
+
+ for (i = 0; mask && i < 8; i++) {
+ u32 mcc = nv_rd32(priv, 0x640180 + (i * 0x20));
+ if (mcc & (1 << head))
+ exec_script(priv, head, i, mcc, 2);
+ }
+
+ pclk = nv_rd32(priv, 0x660450 + (head * 0x300)) / 1000;
+ nv_debug(priv, "head %d pclk %d mask 0x%08x\n", head, pclk, mask);
+ if (pclk && (mask & 0x00010000)) {
+ struct nouveau_clock *clk = nouveau_clock(priv);
+ clk->pll_set(clk, PLL_VPLL0 + head, pclk);
+ }
+
+ nv_wr32(priv, 0x612200 + (head * 0x800), 0x00000000);
+
+ for (i = 0; mask && i < 8; i++) {
+ u32 mcp = nv_rd32(priv, 0x660180 + (i * 0x20)), cfg;
+ if (mcp & (1 << head)) {
+ if ((cfg = exec_clkcmp(priv, head, i, mcp, 0, pclk))) {
+ u32 addr, mask, data = 0x00000000;
+ if (i < 4) {
+ addr = 0x612280 + ((i - 0) * 0x800);
+ mask = 0xffffffff;
+ } else {
+ switch (mcp & 0x00000f00) {
+ case 0x00000800:
+ case 0x00000900:
+ nvd0_display_unk2_calc_tu(priv, head, i - 4);
+ break;
+ default:
+ break;
+ }
+
+ addr = 0x612300 + ((i - 4) * 0x800);
+ mask = 0x00000707;
+ if (cfg & 0x00000100)
+ data = 0x00000101;
+ }
+ nv_mask(priv, addr, mask, data);
+ }
+ break;
+ }
+ }
+
+ nv_wr32(priv, 0x6101d4, 0x00000000);
+ nv_wr32(priv, 0x6109d4, 0x00000000);
+ nv_wr32(priv, 0x6101d0, 0x80000000);
+}
+
+static void
+nvd0_display_unk4_handler(struct nv50_disp_priv *priv, u32 head, u32 mask)
+{
+ int pclk, i;
+
+ pclk = nv_rd32(priv, 0x660450 + (head * 0x300)) / 1000;
+
+ for (i = 0; mask && i < 8; i++) {
+ u32 mcp = nv_rd32(priv, 0x660180 + (i * 0x20));
+ if (mcp & (1 << head))
+ exec_clkcmp(priv, head, i, mcp, 1, pclk);
+ }
+
+ nv_wr32(priv, 0x6101d4, 0x00000000);
+ nv_wr32(priv, 0x6109d4, 0x00000000);
+ nv_wr32(priv, 0x6101d0, 0x80000000);
+}
+
+static void
+nvd0_disp_intr_vblank(struct nv50_disp_priv *priv, int crtc)
{
struct nouveau_bar *bar = nouveau_bar(priv);
struct nouveau_disp *disp = &priv->base;
@@ -65,14 +851,71 @@ nvd0_disp_intr_vblank(struct nvd0_disp_priv *priv, int crtc)
disp->vblank.notify(disp->vblank.data, crtc);
}
-static void
+void
nvd0_disp_intr(struct nouveau_subdev *subdev)
{
- struct nvd0_disp_priv *priv = (void *)subdev;
+ struct nv50_disp_priv *priv = (void *)subdev;
u32 intr = nv_rd32(priv, 0x610088);
int i;
- for (i = 0; i < 4; i++) {
+ if (intr & 0x00000001) {
+ u32 stat = nv_rd32(priv, 0x61008c);
+ nv_wr32(priv, 0x61008c, stat);
+ intr &= ~0x00000001;
+ }
+
+ if (intr & 0x00000002) {
+ u32 stat = nv_rd32(priv, 0x61009c);
+ int chid = ffs(stat) - 1;
+ if (chid >= 0) {
+ u32 mthd = nv_rd32(priv, 0x6101f0 + (chid * 12));
+ u32 data = nv_rd32(priv, 0x6101f4 + (chid * 12));
+ u32 unkn = nv_rd32(priv, 0x6101f8 + (chid * 12));
+
+ nv_error(priv, "chid %d mthd 0x%04x data 0x%08x "
+ "0x%08x 0x%08x\n",
+ chid, (mthd & 0x0000ffc), data, mthd, unkn);
+ nv_wr32(priv, 0x61009c, (1 << chid));
+ nv_wr32(priv, 0x6101f0 + (chid * 12), 0x90000000);
+ }
+
+ intr &= ~0x00000002;
+ }
+
+ if (intr & 0x00100000) {
+ u32 stat = nv_rd32(priv, 0x6100ac);
+ u32 mask = 0, crtc = ~0;
+
+ while (!mask && ++crtc < priv->head.nr)
+ mask = nv_rd32(priv, 0x6101d4 + (crtc * 0x800));
+
+ if (stat & 0x00000001) {
+ nv_wr32(priv, 0x6100ac, 0x00000001);
+ nvd0_display_unk1_handler(priv, crtc, mask);
+ stat &= ~0x00000001;
+ }
+
+ if (stat & 0x00000002) {
+ nv_wr32(priv, 0x6100ac, 0x00000002);
+ nvd0_display_unk2_handler(priv, crtc, mask);
+ stat &= ~0x00000002;
+ }
+
+ if (stat & 0x00000004) {
+ nv_wr32(priv, 0x6100ac, 0x00000004);
+ nvd0_display_unk4_handler(priv, crtc, mask);
+ stat &= ~0x00000004;
+ }
+
+ if (stat) {
+ nv_info(priv, "unknown intr24 0x%08x\n", stat);
+ nv_wr32(priv, 0x6100ac, stat);
+ }
+
+ intr &= ~0x00100000;
+ }
+
+ for (i = 0; i < priv->head.nr; i++) {
u32 mask = 0x01000000 << i;
if (mask & intr) {
u32 stat = nv_rd32(priv, 0x6100bc + (i * 0x800));
@@ -86,10 +929,10 @@ nvd0_disp_intr(struct nouveau_subdev *subdev)
static int
nvd0_disp_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
- struct nouveau_oclass *oclass, void *data, u32 size,
- struct nouveau_object **pobject)
+ struct nouveau_oclass *oclass, void *data, u32 size,
+ struct nouveau_object **pobject)
{
- struct nvd0_disp_priv *priv;
+ struct nv50_disp_priv *priv;
int ret;
ret = nouveau_disp_create(parent, engine, oclass, "PDISP",
@@ -98,8 +941,23 @@ nvd0_disp_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
if (ret)
return ret;
- nv_engine(priv)->sclass = nvd0_disp_sclass;
+ nv_engine(priv)->sclass = nvd0_disp_base_oclass;
+ nv_engine(priv)->cclass = &nv50_disp_cclass;
nv_subdev(priv)->intr = nvd0_disp_intr;
+ priv->sclass = nvd0_disp_sclass;
+ priv->head.nr = nv_rd32(priv, 0x022448);
+ priv->dac.nr = 3;
+ priv->sor.nr = 4;
+ priv->dac.power = nv50_dac_power;
+ priv->dac.sense = nv50_dac_sense;
+ priv->sor.power = nv50_sor_power;
+ priv->sor.hda_eld = nvd0_hda_eld;
+ priv->sor.hdmi = nvd0_hdmi_ctrl;
+ priv->sor.dp_train = nvd0_sor_dp_train;
+ priv->sor.dp_train_init = nv94_sor_dp_train_init;
+ priv->sor.dp_train_fini = nv94_sor_dp_train_fini;
+ priv->sor.dp_lnkctl = nvd0_sor_dp_lnkctl;
+ priv->sor.dp_drvctl = nvd0_sor_dp_drvctl;
INIT_LIST_HEAD(&priv->base.vblank.list);
spin_lock_init(&priv->base.vblank.lock);
@@ -108,7 +966,7 @@ nvd0_disp_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
struct nouveau_oclass
nvd0_disp_oclass = {
- .handle = NV_ENGINE(DISP, 0xd0),
+ .handle = NV_ENGINE(DISP, 0x90),
.ofuncs = &(struct nouveau_ofuncs) {
.ctor = nvd0_disp_ctor,
.dtor = _nouveau_disp_dtor,
diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/nve0.c b/drivers/gpu/drm/nouveau/core/engine/disp/nve0.c
new file mode 100644
index 000000000000..259537c4587e
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/engine/disp/nve0.c
@@ -0,0 +1,94 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include <engine/software.h>
+#include <engine/disp.h>
+
+#include <core/class.h>
+
+#include "nv50.h"
+
+static struct nouveau_oclass
+nve0_disp_sclass[] = {
+ { NVE0_DISP_MAST_CLASS, &nvd0_disp_mast_ofuncs },
+ { NVE0_DISP_SYNC_CLASS, &nvd0_disp_sync_ofuncs },
+ { NVE0_DISP_OVLY_CLASS, &nvd0_disp_ovly_ofuncs },
+ { NVE0_DISP_OIMM_CLASS, &nvd0_disp_oimm_ofuncs },
+ { NVE0_DISP_CURS_CLASS, &nvd0_disp_curs_ofuncs },
+ {}
+};
+
+static struct nouveau_oclass
+nve0_disp_base_oclass[] = {
+ { NVE0_DISP_CLASS, &nvd0_disp_base_ofuncs, nva3_disp_base_omthds },
+ {}
+};
+
+static int
+nve0_disp_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
+ struct nouveau_oclass *oclass, void *data, u32 size,
+ struct nouveau_object **pobject)
+{
+ struct nv50_disp_priv *priv;
+ int ret;
+
+ ret = nouveau_disp_create(parent, engine, oclass, "PDISP",
+ "display", &priv);
+ *pobject = nv_object(priv);
+ if (ret)
+ return ret;
+
+ nv_engine(priv)->sclass = nve0_disp_base_oclass;
+ nv_engine(priv)->cclass = &nv50_disp_cclass;
+ nv_subdev(priv)->intr = nvd0_disp_intr;
+ priv->sclass = nve0_disp_sclass;
+ priv->head.nr = nv_rd32(priv, 0x022448);
+ priv->dac.nr = 3;
+ priv->sor.nr = 4;
+ priv->dac.power = nv50_dac_power;
+ priv->dac.sense = nv50_dac_sense;
+ priv->sor.power = nv50_sor_power;
+ priv->sor.hda_eld = nvd0_hda_eld;
+ priv->sor.hdmi = nvd0_hdmi_ctrl;
+ priv->sor.dp_train = nvd0_sor_dp_train;
+ priv->sor.dp_train_init = nv94_sor_dp_train_init;
+ priv->sor.dp_train_fini = nv94_sor_dp_train_fini;
+ priv->sor.dp_lnkctl = nvd0_sor_dp_lnkctl;
+ priv->sor.dp_drvctl = nvd0_sor_dp_drvctl;
+
+ INIT_LIST_HEAD(&priv->base.vblank.list);
+ spin_lock_init(&priv->base.vblank.lock);
+ return 0;
+}
+
+struct nouveau_oclass
+nve0_disp_oclass = {
+ .handle = NV_ENGINE(DISP, 0x91),
+ .ofuncs = &(struct nouveau_ofuncs) {
+ .ctor = nve0_disp_ctor,
+ .dtor = _nouveau_disp_dtor,
+ .init = _nouveau_disp_init,
+ .fini = _nouveau_disp_fini,
+ },
+};
diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/sornv50.c b/drivers/gpu/drm/nouveau/core/engine/disp/sornv50.c
new file mode 100644
index 000000000000..39b6b67732d0
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/engine/disp/sornv50.c
@@ -0,0 +1,112 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include <core/os.h>
+#include <core/class.h>
+
+#include <subdev/bios.h>
+#include <subdev/bios/dcb.h>
+#include <subdev/timer.h>
+
+#include "nv50.h"
+
+int
+nv50_sor_power(struct nv50_disp_priv *priv, int or, u32 data)
+{
+ const u32 stat = data & NV50_DISP_SOR_PWR_STATE;
+ const u32 soff = (or * 0x800);
+ nv_wait(priv, 0x61c004 + soff, 0x80000000, 0x00000000);
+ nv_mask(priv, 0x61c004 + soff, 0x80000001, 0x80000000 | stat);
+ nv_wait(priv, 0x61c004 + soff, 0x80000000, 0x00000000);
+ nv_wait(priv, 0x61c030 + soff, 0x10000000, 0x00000000);
+ return 0;
+}
+
+int
+nv50_sor_mthd(struct nouveau_object *object, u32 mthd, void *args, u32 size)
+{
+ struct nv50_disp_priv *priv = (void *)object->engine;
+ struct nouveau_bios *bios = nouveau_bios(priv);
+ const u16 type = (mthd & NV50_DISP_SOR_MTHD_TYPE) >> 12;
+ const u8 head = (mthd & NV50_DISP_SOR_MTHD_HEAD) >> 3;
+ const u8 link = (mthd & NV50_DISP_SOR_MTHD_LINK) >> 2;
+ const u8 or = (mthd & NV50_DISP_SOR_MTHD_OR);
+ const u16 mask = (0x0100 << head) | (0x0040 << link) | (0x0001 << or);
+ struct dcb_output outp;
+ u8 ver, hdr;
+ u32 data;
+ int ret = -EINVAL;
+
+ if (size < sizeof(u32))
+ return -EINVAL;
+ data = *(u32 *)args;
+
+ if (type && !dcb_outp_match(bios, type, mask, &ver, &hdr, &outp))
+ return -ENODEV;
+
+ switch (mthd & ~0x3f) {
+ case NV50_DISP_SOR_PWR:
+ ret = priv->sor.power(priv, or, data);
+ break;
+ case NVA3_DISP_SOR_HDA_ELD:
+ ret = priv->sor.hda_eld(priv, or, args, size);
+ break;
+ case NV84_DISP_SOR_HDMI_PWR:
+ ret = priv->sor.hdmi(priv, head, or, data);
+ break;
+ case NV50_DISP_SOR_LVDS_SCRIPT:
+ priv->sor.lvdsconf = data & NV50_DISP_SOR_LVDS_SCRIPT_ID;
+ ret = 0;
+ break;
+ case NV94_DISP_SOR_DP_TRAIN:
+ switch (data & NV94_DISP_SOR_DP_TRAIN_OP) {
+ case NV94_DISP_SOR_DP_TRAIN_OP_PATTERN:
+ ret = priv->sor.dp_train(priv, or, link, type, mask, data, &outp);
+ break;
+ case NV94_DISP_SOR_DP_TRAIN_OP_INIT:
+ ret = priv->sor.dp_train_init(priv, or, link, head, type, mask, data, &outp);
+ break;
+ case NV94_DISP_SOR_DP_TRAIN_OP_FINI:
+ ret = priv->sor.dp_train_fini(priv, or, link, head, type, mask, data, &outp);
+ break;
+ default:
+ break;
+ }
+ break;
+ case NV94_DISP_SOR_DP_LNKCTL:
+ ret = priv->sor.dp_lnkctl(priv, or, link, head, type, mask, data, &outp);
+ break;
+ case NV94_DISP_SOR_DP_DRVCTL(0):
+ case NV94_DISP_SOR_DP_DRVCTL(1):
+ case NV94_DISP_SOR_DP_DRVCTL(2):
+ case NV94_DISP_SOR_DP_DRVCTL(3):
+ ret = priv->sor.dp_drvctl(priv, or, link, (mthd & 0xc0) >> 6,
+ type, mask, data, &outp);
+ break;
+ default:
+ BUG_ON(1);
+ }
+
+ return ret;
+}
diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/sornv94.c b/drivers/gpu/drm/nouveau/core/engine/disp/sornv94.c
new file mode 100644
index 000000000000..f6edd009762e
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/engine/disp/sornv94.c
@@ -0,0 +1,190 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include <core/os.h>
+#include <core/class.h>
+
+#include <subdev/bios.h>
+#include <subdev/bios/dcb.h>
+#include <subdev/bios/dp.h>
+#include <subdev/bios/init.h>
+
+#include "nv50.h"
+
+static inline u32
+nv94_sor_dp_lane_map(struct nv50_disp_priv *priv, u8 lane)
+{
+ static const u8 nvaf[] = { 24, 16, 8, 0 }; /* thanks, apple.. */
+ static const u8 nv94[] = { 16, 8, 0, 24 };
+ if (nv_device(priv)->chipset == 0xaf)
+ return nvaf[lane];
+ return nv94[lane];
+}
+
+int
+nv94_sor_dp_train_init(struct nv50_disp_priv *priv, int or, int link, int head,
+ u16 type, u16 mask, u32 data, struct dcb_output *dcbo)
+{
+ struct nouveau_bios *bios = nouveau_bios(priv);
+ struct nvbios_dpout info;
+ u8 ver, hdr, cnt, len;
+ u16 outp;
+
+ outp = nvbios_dpout_match(bios, type, mask, &ver, &hdr, &cnt, &len, &info);
+ if (outp) {
+ struct nvbios_init init = {
+ .subdev = nv_subdev(priv),
+ .bios = bios,
+ .outp = dcbo,
+ .crtc = head,
+ .execute = 1,
+ };
+
+ if (data & NV94_DISP_SOR_DP_TRAIN_INIT_SPREAD_ON)
+ init.offset = info.script[2];
+ else
+ init.offset = info.script[3];
+ nvbios_exec(&init);
+
+ init.offset = info.script[0];
+ nvbios_exec(&init);
+ }
+
+ return 0;
+}
+
+int
+nv94_sor_dp_train_fini(struct nv50_disp_priv *priv, int or, int link, int head,
+ u16 type, u16 mask, u32 data, struct dcb_output *dcbo)
+{
+ struct nouveau_bios *bios = nouveau_bios(priv);
+ struct nvbios_dpout info;
+ u8 ver, hdr, cnt, len;
+ u16 outp;
+
+ outp = nvbios_dpout_match(bios, type, mask, &ver, &hdr, &cnt, &len, &info);
+ if (outp) {
+ struct nvbios_init init = {
+ .subdev = nv_subdev(priv),
+ .bios = bios,
+ .offset = info.script[1],
+ .outp = dcbo,
+ .crtc = head,
+ .execute = 1,
+ };
+
+ nvbios_exec(&init);
+ }
+
+ return 0;
+}
+
+int
+nv94_sor_dp_train(struct nv50_disp_priv *priv, int or, int link,
+ u16 type, u16 mask, u32 data, struct dcb_output *info)
+{
+ const u32 loff = (or * 0x800) + (link * 0x80);
+ const u32 patt = (data & NV94_DISP_SOR_DP_TRAIN_PATTERN);
+ nv_mask(priv, 0x61c10c + loff, 0x0f000000, patt << 24);
+ return 0;
+}
+
+int
+nv94_sor_dp_lnkctl(struct nv50_disp_priv *priv, int or, int link, int head,
+ u16 type, u16 mask, u32 data, struct dcb_output *dcbo)
+{
+ struct nouveau_bios *bios = nouveau_bios(priv);
+ const u32 loff = (or * 0x800) + (link * 0x80);
+ const u32 soff = (or * 0x800);
+ u16 link_bw = (data & NV94_DISP_SOR_DP_LNKCTL_WIDTH) >> 8;
+ u8 link_nr = (data & NV94_DISP_SOR_DP_LNKCTL_COUNT);
+ u32 dpctrl = 0x00000000;
+ u32 clksor = 0x00000000;
+ u32 outp, lane = 0;
+ u8 ver, hdr, cnt, len;
+ struct nvbios_dpout info;
+ int i;
+
+ /* -> 10Khz units */
+ link_bw *= 2700;
+
+ outp = nvbios_dpout_match(bios, type, mask, &ver, &hdr, &cnt, &len, &info);
+ if (outp && info.lnkcmp) {
+ struct nvbios_init init = {
+ .subdev = nv_subdev(priv),
+ .bios = bios,
+ .offset = 0x0000,
+ .outp = dcbo,
+ .crtc = head,
+ .execute = 1,
+ };
+
+ while (link_bw < nv_ro16(bios, info.lnkcmp))
+ info.lnkcmp += 4;
+ init.offset = nv_ro16(bios, info.lnkcmp + 2);
+
+ nvbios_exec(&init);
+ }
+
+ dpctrl |= ((1 << link_nr) - 1) << 16;
+ if (data & NV94_DISP_SOR_DP_LNKCTL_FRAME_ENH)
+ dpctrl |= 0x00004000;
+ if (link_bw > 16200)
+ clksor |= 0x00040000;
+
+ for (i = 0; i < link_nr; i++)
+ lane |= 1 << (nv94_sor_dp_lane_map(priv, i) >> 3);
+
+ nv_mask(priv, 0x614300 + soff, 0x000c0000, clksor);
+ nv_mask(priv, 0x61c10c + loff, 0x001f4000, dpctrl);
+ nv_mask(priv, 0x61c130 + loff, 0x0000000f, lane);
+ return 0;
+}
+
+int
+nv94_sor_dp_drvctl(struct nv50_disp_priv *priv, int or, int link, int lane,
+ u16 type, u16 mask, u32 data, struct dcb_output *dcbo)
+{
+ struct nouveau_bios *bios = nouveau_bios(priv);
+ const u32 loff = (or * 0x800) + (link * 0x80);
+ const u8 swing = (data & NV94_DISP_SOR_DP_DRVCTL_VS) >> 8;
+ const u8 preem = (data & NV94_DISP_SOR_DP_DRVCTL_PE);
+ u32 addr, shift = nv94_sor_dp_lane_map(priv, lane);
+ u8 ver, hdr, cnt, len;
+ struct nvbios_dpout outp;
+ struct nvbios_dpcfg ocfg;
+
+ addr = nvbios_dpout_match(bios, type, mask, &ver, &hdr, &cnt, &len, &outp);
+ if (!addr)
+ return -ENODEV;
+
+ addr = nvbios_dpcfg_match(bios, addr, 0, swing, preem, &ver, &hdr, &cnt, &len, &ocfg);
+ if (!addr)
+ return -EINVAL;
+
+ nv_mask(priv, 0x61c118 + loff, 0x000000ff << shift, ocfg.drv << shift);
+ nv_mask(priv, 0x61c120 + loff, 0x000000ff << shift, ocfg.pre << shift);
+ nv_mask(priv, 0x61c130 + loff, 0x0000ff00, ocfg.unk << 8);
+ return 0;
+}
diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/sornvd0.c b/drivers/gpu/drm/nouveau/core/engine/disp/sornvd0.c
new file mode 100644
index 000000000000..c37ce7e29f5d
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/engine/disp/sornvd0.c
@@ -0,0 +1,126 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include <core/os.h>
+#include <core/class.h>
+
+#include <subdev/bios.h>
+#include <subdev/bios/dcb.h>
+#include <subdev/bios/dp.h>
+#include <subdev/bios/init.h>
+
+#include "nv50.h"
+
+static inline u32
+nvd0_sor_dp_lane_map(struct nv50_disp_priv *priv, u8 lane)
+{
+ static const u8 nvd0[] = { 16, 8, 0, 24 };
+ return nvd0[lane];
+}
+
+int
+nvd0_sor_dp_train(struct nv50_disp_priv *priv, int or, int link,
+ u16 type, u16 mask, u32 data, struct dcb_output *info)
+{
+ const u32 loff = (or * 0x800) + (link * 0x80);
+ const u32 patt = (data & NV94_DISP_SOR_DP_TRAIN_PATTERN);
+ nv_mask(priv, 0x61c110 + loff, 0x0f0f0f0f, 0x01010101 * patt);
+ return 0;
+}
+
+int
+nvd0_sor_dp_lnkctl(struct nv50_disp_priv *priv, int or, int link, int head,
+ u16 type, u16 mask, u32 data, struct dcb_output *dcbo)
+{
+ struct nouveau_bios *bios = nouveau_bios(priv);
+ const u32 loff = (or * 0x800) + (link * 0x80);
+ const u32 soff = (or * 0x800);
+ const u8 link_bw = (data & NV94_DISP_SOR_DP_LNKCTL_WIDTH) >> 8;
+ const u8 link_nr = (data & NV94_DISP_SOR_DP_LNKCTL_COUNT);
+ u32 dpctrl = 0x00000000;
+ u32 clksor = 0x00000000;
+ u32 outp, lane = 0;
+ u8 ver, hdr, cnt, len;
+ struct nvbios_dpout info;
+ int i;
+
+ outp = nvbios_dpout_match(bios, type, mask, &ver, &hdr, &cnt, &len, &info);
+ if (outp && info.lnkcmp) {
+ struct nvbios_init init = {
+ .subdev = nv_subdev(priv),
+ .bios = bios,
+ .offset = 0x0000,
+ .outp = dcbo,
+ .crtc = head,
+ .execute = 1,
+ };
+
+ while (nv_ro08(bios, info.lnkcmp) < link_bw)
+ info.lnkcmp += 3;
+ init.offset = nv_ro16(bios, info.lnkcmp + 1);
+
+ nvbios_exec(&init);
+ }
+
+ clksor |= link_bw << 18;
+ dpctrl |= ((1 << link_nr) - 1) << 16;
+ if (data & NV94_DISP_SOR_DP_LNKCTL_FRAME_ENH)
+ dpctrl |= 0x00004000;
+
+ for (i = 0; i < link_nr; i++)
+ lane |= 1 << (nvd0_sor_dp_lane_map(priv, i) >> 3);
+
+ nv_mask(priv, 0x612300 + soff, 0x007c0000, clksor);
+ nv_mask(priv, 0x61c10c + loff, 0x001f4000, dpctrl);
+ nv_mask(priv, 0x61c130 + loff, 0x0000000f, lane);
+ return 0;
+}
+
+int
+nvd0_sor_dp_drvctl(struct nv50_disp_priv *priv, int or, int link, int lane,
+ u16 type, u16 mask, u32 data, struct dcb_output *dcbo)
+{
+ struct nouveau_bios *bios = nouveau_bios(priv);
+ const u32 loff = (or * 0x800) + (link * 0x80);
+ const u8 swing = (data & NV94_DISP_SOR_DP_DRVCTL_VS) >> 8;
+ const u8 preem = (data & NV94_DISP_SOR_DP_DRVCTL_PE);
+ u32 addr, shift = nvd0_sor_dp_lane_map(priv, lane);
+ u8 ver, hdr, cnt, len;
+ struct nvbios_dpout outp;
+ struct nvbios_dpcfg ocfg;
+
+ addr = nvbios_dpout_match(bios, type, mask, &ver, &hdr, &cnt, &len, &outp);
+ if (!addr)
+ return -ENODEV;
+
+ addr = nvbios_dpcfg_match(bios, addr, 0, swing, preem, &ver, &hdr, &cnt, &len, &ocfg);
+ if (!addr)
+ return -EINVAL;
+
+ nv_mask(priv, 0x61c118 + loff, 0x000000ff << shift, ocfg.drv << shift);
+ nv_mask(priv, 0x61c120 + loff, 0x000000ff << shift, ocfg.pre << shift);
+ nv_mask(priv, 0x61c130 + loff, 0x0000ff00, ocfg.unk << 8);
+ nv_mask(priv, 0x61c13c + loff, 0x00000000, 0x00000000);
+ return 0;
+}
diff --git a/drivers/gpu/drm/nouveau/core/engine/dmaobj/base.c b/drivers/gpu/drm/nouveau/core/engine/dmaobj/base.c
index e1f013d39768..5103e88d1877 100644
--- a/drivers/gpu/drm/nouveau/core/engine/dmaobj/base.c
+++ b/drivers/gpu/drm/nouveau/core/engine/dmaobj/base.c
@@ -28,37 +28,39 @@
#include <subdev/fb.h>
#include <engine/dmaobj.h>
-int
-nouveau_dmaobj_create_(struct nouveau_object *parent,
- struct nouveau_object *engine,
- struct nouveau_oclass *oclass,
- void *data, u32 size, int len, void **pobject)
+static int
+nouveau_dmaobj_ctor(struct nouveau_object *parent,
+ struct nouveau_object *engine,
+ struct nouveau_oclass *oclass, void *data, u32 size,
+ struct nouveau_object **pobject)
{
+ struct nouveau_dmaeng *dmaeng = (void *)engine;
+ struct nouveau_dmaobj *dmaobj;
+ struct nouveau_gpuobj *gpuobj;
struct nv_dma_class *args = data;
- struct nouveau_dmaobj *object;
int ret;
if (size < sizeof(*args))
return -EINVAL;
- ret = nouveau_object_create_(parent, engine, oclass, 0, len, pobject);
- object = *pobject;
+ ret = nouveau_object_create(parent, engine, oclass, 0, &dmaobj);
+ *pobject = nv_object(dmaobj);
if (ret)
return ret;
switch (args->flags & NV_DMA_TARGET_MASK) {
case NV_DMA_TARGET_VM:
- object->target = NV_MEM_TARGET_VM;
+ dmaobj->target = NV_MEM_TARGET_VM;
break;
case NV_DMA_TARGET_VRAM:
- object->target = NV_MEM_TARGET_VRAM;
+ dmaobj->target = NV_MEM_TARGET_VRAM;
break;
case NV_DMA_TARGET_PCI:
- object->target = NV_MEM_TARGET_PCI;
+ dmaobj->target = NV_MEM_TARGET_PCI;
break;
case NV_DMA_TARGET_PCI_US:
case NV_DMA_TARGET_AGP:
- object->target = NV_MEM_TARGET_PCI_NOSNOOP;
+ dmaobj->target = NV_MEM_TARGET_PCI_NOSNOOP;
break;
default:
return -EINVAL;
@@ -66,22 +68,53 @@ nouveau_dmaobj_create_(struct nouveau_object *parent,
switch (args->flags & NV_DMA_ACCESS_MASK) {
case NV_DMA_ACCESS_VM:
- object->access = NV_MEM_ACCESS_VM;
+ dmaobj->access = NV_MEM_ACCESS_VM;
break;
case NV_DMA_ACCESS_RD:
- object->access = NV_MEM_ACCESS_RO;
+ dmaobj->access = NV_MEM_ACCESS_RO;
break;
case NV_DMA_ACCESS_WR:
- object->access = NV_MEM_ACCESS_WO;
+ dmaobj->access = NV_MEM_ACCESS_WO;
break;
case NV_DMA_ACCESS_RDWR:
- object->access = NV_MEM_ACCESS_RW;
+ dmaobj->access = NV_MEM_ACCESS_RW;
break;
default:
return -EINVAL;
}
- object->start = args->start;
- object->limit = args->limit;
- return 0;
+ dmaobj->start = args->start;
+ dmaobj->limit = args->limit;
+ dmaobj->conf0 = args->conf0;
+
+ switch (nv_mclass(parent)) {
+ case NV_DEVICE_CLASS:
+ /* delayed, or no, binding */
+ break;
+ default:
+ ret = dmaeng->bind(dmaeng, *pobject, dmaobj, &gpuobj);
+ if (ret == 0) {
+ nouveau_object_ref(NULL, pobject);
+ *pobject = nv_object(gpuobj);
+ }
+ break;
+ }
+
+ return ret;
}
+
+static struct nouveau_ofuncs
+nouveau_dmaobj_ofuncs = {
+ .ctor = nouveau_dmaobj_ctor,
+ .dtor = nouveau_object_destroy,
+ .init = nouveau_object_init,
+ .fini = nouveau_object_fini,
+};
+
+struct nouveau_oclass
+nouveau_dmaobj_sclass[] = {
+ { NV_DMA_FROM_MEMORY_CLASS, &nouveau_dmaobj_ofuncs },
+ { NV_DMA_TO_MEMORY_CLASS, &nouveau_dmaobj_ofuncs },
+ { NV_DMA_IN_MEMORY_CLASS, &nouveau_dmaobj_ofuncs },
+ {}
+};
diff --git a/drivers/gpu/drm/nouveau/core/engine/dmaobj/nv04.c b/drivers/gpu/drm/nouveau/core/engine/dmaobj/nv04.c
index 9f4cc2f31994..027d8217c0fa 100644
--- a/drivers/gpu/drm/nouveau/core/engine/dmaobj/nv04.c
+++ b/drivers/gpu/drm/nouveau/core/engine/dmaobj/nv04.c
@@ -34,10 +34,6 @@ struct nv04_dmaeng_priv {
struct nouveau_dmaeng base;
};
-struct nv04_dmaobj_priv {
- struct nouveau_dmaobj base;
-};
-
static int
nv04_dmaobj_bind(struct nouveau_dmaeng *dmaeng,
struct nouveau_object *parent,
@@ -53,6 +49,18 @@ nv04_dmaobj_bind(struct nouveau_dmaeng *dmaeng,
u32 length = dmaobj->limit - dmaobj->start;
int ret;
+ if (!nv_iclass(parent, NV_ENGCTX_CLASS)) {
+ switch (nv_mclass(parent->parent)) {
+ case NV03_CHANNEL_DMA_CLASS:
+ case NV10_CHANNEL_DMA_CLASS:
+ case NV17_CHANNEL_DMA_CLASS:
+ case NV40_CHANNEL_DMA_CLASS:
+ break;
+ default:
+ return -EINVAL;
+ }
+ }
+
if (dmaobj->target == NV_MEM_TARGET_VM) {
if (nv_object(vmm)->oclass == &nv04_vmmgr_oclass) {
struct nouveau_gpuobj *pgt = vmm->vm->pgt[0].obj[0];
@@ -106,56 +114,6 @@ nv04_dmaobj_bind(struct nouveau_dmaeng *dmaeng,
}
static int
-nv04_dmaobj_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
- struct nouveau_oclass *oclass, void *data, u32 size,
- struct nouveau_object **pobject)
-{
- struct nouveau_dmaeng *dmaeng = (void *)engine;
- struct nv04_dmaobj_priv *dmaobj;
- struct nouveau_gpuobj *gpuobj;
- int ret;
-
- ret = nouveau_dmaobj_create(parent, engine, oclass,
- data, size, &dmaobj);
- *pobject = nv_object(dmaobj);
- if (ret)
- return ret;
-
- switch (nv_mclass(parent)) {
- case NV_DEVICE_CLASS:
- break;
- case NV03_CHANNEL_DMA_CLASS:
- case NV10_CHANNEL_DMA_CLASS:
- case NV17_CHANNEL_DMA_CLASS:
- case NV40_CHANNEL_DMA_CLASS:
- ret = dmaeng->bind(dmaeng, *pobject, &dmaobj->base, &gpuobj);
- nouveau_object_ref(NULL, pobject);
- *pobject = nv_object(gpuobj);
- break;
- default:
- return -EINVAL;
- }
-
- return ret;
-}
-
-static struct nouveau_ofuncs
-nv04_dmaobj_ofuncs = {
- .ctor = nv04_dmaobj_ctor,
- .dtor = _nouveau_dmaobj_dtor,
- .init = _nouveau_dmaobj_init,
- .fini = _nouveau_dmaobj_fini,
-};
-
-static struct nouveau_oclass
-nv04_dmaobj_sclass[] = {
- { 0x0002, &nv04_dmaobj_ofuncs },
- { 0x0003, &nv04_dmaobj_ofuncs },
- { 0x003d, &nv04_dmaobj_ofuncs },
- {}
-};
-
-static int
nv04_dmaeng_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
struct nouveau_oclass *oclass, void *data, u32 size,
struct nouveau_object **pobject)
@@ -168,7 +126,7 @@ nv04_dmaeng_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
if (ret)
return ret;
- priv->base.base.sclass = nv04_dmaobj_sclass;
+ nv_engine(priv)->sclass = nouveau_dmaobj_sclass;
priv->base.bind = nv04_dmaobj_bind;
return 0;
}
diff --git a/drivers/gpu/drm/nouveau/core/engine/dmaobj/nv50.c b/drivers/gpu/drm/nouveau/core/engine/dmaobj/nv50.c
index 045d2565e289..750183f7c057 100644
--- a/drivers/gpu/drm/nouveau/core/engine/dmaobj/nv50.c
+++ b/drivers/gpu/drm/nouveau/core/engine/dmaobj/nv50.c
@@ -32,36 +32,74 @@ struct nv50_dmaeng_priv {
struct nouveau_dmaeng base;
};
-struct nv50_dmaobj_priv {
- struct nouveau_dmaobj base;
-};
-
static int
nv50_dmaobj_bind(struct nouveau_dmaeng *dmaeng,
struct nouveau_object *parent,
struct nouveau_dmaobj *dmaobj,
struct nouveau_gpuobj **pgpuobj)
{
- u32 flags = nv_mclass(dmaobj);
+ u32 flags0 = nv_mclass(dmaobj);
+ u32 flags5 = 0x00000000;
int ret;
+ if (!nv_iclass(parent, NV_ENGCTX_CLASS)) {
+ switch (nv_mclass(parent->parent)) {
+ case NV50_CHANNEL_DMA_CLASS:
+ case NV84_CHANNEL_DMA_CLASS:
+ case NV50_CHANNEL_IND_CLASS:
+ case NV84_CHANNEL_IND_CLASS:
+ case NV50_DISP_MAST_CLASS:
+ case NV84_DISP_MAST_CLASS:
+ case NV94_DISP_MAST_CLASS:
+ case NVA0_DISP_MAST_CLASS:
+ case NVA3_DISP_MAST_CLASS:
+ case NV50_DISP_SYNC_CLASS:
+ case NV84_DISP_SYNC_CLASS:
+ case NV94_DISP_SYNC_CLASS:
+ case NVA0_DISP_SYNC_CLASS:
+ case NVA3_DISP_SYNC_CLASS:
+ case NV50_DISP_OVLY_CLASS:
+ case NV84_DISP_OVLY_CLASS:
+ case NV94_DISP_OVLY_CLASS:
+ case NVA0_DISP_OVLY_CLASS:
+ case NVA3_DISP_OVLY_CLASS:
+ break;
+ default:
+ return -EINVAL;
+ }
+ }
+
+ if (!(dmaobj->conf0 & NV50_DMA_CONF0_ENABLE)) {
+ if (dmaobj->target == NV_MEM_TARGET_VM) {
+ dmaobj->conf0 = NV50_DMA_CONF0_PRIV_VM;
+ dmaobj->conf0 |= NV50_DMA_CONF0_PART_VM;
+ dmaobj->conf0 |= NV50_DMA_CONF0_COMP_VM;
+ dmaobj->conf0 |= NV50_DMA_CONF0_TYPE_VM;
+ } else {
+ dmaobj->conf0 = NV50_DMA_CONF0_PRIV_US;
+ dmaobj->conf0 |= NV50_DMA_CONF0_PART_256;
+ dmaobj->conf0 |= NV50_DMA_CONF0_COMP_NONE;
+ dmaobj->conf0 |= NV50_DMA_CONF0_TYPE_LINEAR;
+ }
+ }
+
+ flags0 |= (dmaobj->conf0 & NV50_DMA_CONF0_COMP) << 22;
+ flags0 |= (dmaobj->conf0 & NV50_DMA_CONF0_TYPE) << 22;
+ flags0 |= (dmaobj->conf0 & NV50_DMA_CONF0_PRIV);
+ flags5 |= (dmaobj->conf0 & NV50_DMA_CONF0_PART);
+
switch (dmaobj->target) {
case NV_MEM_TARGET_VM:
- flags |= 0x00000000;
- flags |= 0x60000000; /* COMPRESSION_USEVM */
- flags |= 0x1fc00000; /* STORAGE_TYPE_USEVM */
+ flags0 |= 0x00000000;
break;
case NV_MEM_TARGET_VRAM:
- flags |= 0x00010000;
- flags |= 0x00100000; /* ACCESSUS_USER_SYSTEM */
+ flags0 |= 0x00010000;
break;
case NV_MEM_TARGET_PCI:
- flags |= 0x00020000;
- flags |= 0x00100000; /* ACCESSUS_USER_SYSTEM */
+ flags0 |= 0x00020000;
break;
case NV_MEM_TARGET_PCI_NOSNOOP:
- flags |= 0x00030000;
- flags |= 0x00100000; /* ACCESSUS_USER_SYSTEM */
+ flags0 |= 0x00030000;
break;
default:
return -EINVAL;
@@ -71,79 +109,29 @@ nv50_dmaobj_bind(struct nouveau_dmaeng *dmaeng,
case NV_MEM_ACCESS_VM:
break;
case NV_MEM_ACCESS_RO:
- flags |= 0x00040000;
+ flags0 |= 0x00040000;
break;
case NV_MEM_ACCESS_WO:
case NV_MEM_ACCESS_RW:
- flags |= 0x00080000;
+ flags0 |= 0x00080000;
break;
}
ret = nouveau_gpuobj_new(parent, parent, 24, 32, 0, pgpuobj);
if (ret == 0) {
- nv_wo32(*pgpuobj, 0x00, flags);
+ nv_wo32(*pgpuobj, 0x00, flags0);
nv_wo32(*pgpuobj, 0x04, lower_32_bits(dmaobj->limit));
nv_wo32(*pgpuobj, 0x08, lower_32_bits(dmaobj->start));
nv_wo32(*pgpuobj, 0x0c, upper_32_bits(dmaobj->limit) << 24 |
upper_32_bits(dmaobj->start));
nv_wo32(*pgpuobj, 0x10, 0x00000000);
- nv_wo32(*pgpuobj, 0x14, 0x00000000);
+ nv_wo32(*pgpuobj, 0x14, flags5);
}
return ret;
}
static int
-nv50_dmaobj_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
- struct nouveau_oclass *oclass, void *data, u32 size,
- struct nouveau_object **pobject)
-{
- struct nouveau_dmaeng *dmaeng = (void *)engine;
- struct nv50_dmaobj_priv *dmaobj;
- struct nouveau_gpuobj *gpuobj;
- int ret;
-
- ret = nouveau_dmaobj_create(parent, engine, oclass,
- data, size, &dmaobj);
- *pobject = nv_object(dmaobj);
- if (ret)
- return ret;
-
- switch (nv_mclass(parent)) {
- case NV_DEVICE_CLASS:
- break;
- case NV50_CHANNEL_DMA_CLASS:
- case NV84_CHANNEL_DMA_CLASS:
- case NV50_CHANNEL_IND_CLASS:
- case NV84_CHANNEL_IND_CLASS:
- ret = dmaeng->bind(dmaeng, *pobject, &dmaobj->base, &gpuobj);
- nouveau_object_ref(NULL, pobject);
- *pobject = nv_object(gpuobj);
- break;
- default:
- return -EINVAL;
- }
-
- return ret;
-}
-
-static struct nouveau_ofuncs
-nv50_dmaobj_ofuncs = {
- .ctor = nv50_dmaobj_ctor,
- .dtor = _nouveau_dmaobj_dtor,
- .init = _nouveau_dmaobj_init,
- .fini = _nouveau_dmaobj_fini,
-};
-
-static struct nouveau_oclass
-nv50_dmaobj_sclass[] = {
- { 0x0002, &nv50_dmaobj_ofuncs },
- { 0x0003, &nv50_dmaobj_ofuncs },
- { 0x003d, &nv50_dmaobj_ofuncs },
- {}
-};
-
-static int
nv50_dmaeng_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
struct nouveau_oclass *oclass, void *data, u32 size,
struct nouveau_object **pobject)
@@ -156,7 +144,7 @@ nv50_dmaeng_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
if (ret)
return ret;
- priv->base.base.sclass = nv50_dmaobj_sclass;
+ nv_engine(priv)->sclass = nouveau_dmaobj_sclass;
priv->base.bind = nv50_dmaobj_bind;
return 0;
}
diff --git a/drivers/gpu/drm/nouveau/core/engine/dmaobj/nvc0.c b/drivers/gpu/drm/nouveau/core/engine/dmaobj/nvc0.c
index 5baa08695535..cd3970d03b80 100644
--- a/drivers/gpu/drm/nouveau/core/engine/dmaobj/nvc0.c
+++ b/drivers/gpu/drm/nouveau/core/engine/dmaobj/nvc0.c
@@ -22,7 +22,9 @@
* Authors: Ben Skeggs
*/
+#include <core/device.h>
#include <core/gpuobj.h>
+#include <core/class.h>
#include <subdev/fb.h>
#include <engine/dmaobj.h>
@@ -31,44 +33,85 @@ struct nvc0_dmaeng_priv {
struct nouveau_dmaeng base;
};
-struct nvc0_dmaobj_priv {
- struct nouveau_dmaobj base;
-};
-
static int
-nvc0_dmaobj_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
- struct nouveau_oclass *oclass, void *data, u32 size,
- struct nouveau_object **pobject)
+nvc0_dmaobj_bind(struct nouveau_dmaeng *dmaeng,
+ struct nouveau_object *parent,
+ struct nouveau_dmaobj *dmaobj,
+ struct nouveau_gpuobj **pgpuobj)
{
- struct nvc0_dmaobj_priv *dmaobj;
+ u32 flags0 = nv_mclass(dmaobj);
+ u32 flags5 = 0x00000000;
int ret;
- ret = nouveau_dmaobj_create(parent, engine, oclass, data, size, &dmaobj);
- *pobject = nv_object(dmaobj);
- if (ret)
- return ret;
+ if (!nv_iclass(parent, NV_ENGCTX_CLASS)) {
+ switch (nv_mclass(parent->parent)) {
+ case NVA3_DISP_MAST_CLASS:
+ case NVA3_DISP_SYNC_CLASS:
+ case NVA3_DISP_OVLY_CLASS:
+ break;
+ default:
+ return -EINVAL;
+ }
+ } else
+ return 0;
+
+ if (!(dmaobj->conf0 & NVC0_DMA_CONF0_ENABLE)) {
+ if (dmaobj->target == NV_MEM_TARGET_VM) {
+ dmaobj->conf0 = NVC0_DMA_CONF0_PRIV_VM;
+ dmaobj->conf0 |= NVC0_DMA_CONF0_TYPE_VM;
+ } else {
+ dmaobj->conf0 = NVC0_DMA_CONF0_PRIV_US;
+ dmaobj->conf0 |= NVC0_DMA_CONF0_TYPE_LINEAR;
+ dmaobj->conf0 |= 0x00020000;
+ }
+ }
- if (dmaobj->base.target != NV_MEM_TARGET_VM || dmaobj->base.start)
+ flags0 |= (dmaobj->conf0 & NVC0_DMA_CONF0_TYPE) << 22;
+ flags0 |= (dmaobj->conf0 & NVC0_DMA_CONF0_PRIV);
+ flags5 |= (dmaobj->conf0 & NVC0_DMA_CONF0_UNKN);
+
+ switch (dmaobj->target) {
+ case NV_MEM_TARGET_VM:
+ flags0 |= 0x00000000;
+ break;
+ case NV_MEM_TARGET_VRAM:
+ flags0 |= 0x00010000;
+ break;
+ case NV_MEM_TARGET_PCI:
+ flags0 |= 0x00020000;
+ break;
+ case NV_MEM_TARGET_PCI_NOSNOOP:
+ flags0 |= 0x00030000;
+ break;
+ default:
return -EINVAL;
+ }
- return 0;
-}
+ switch (dmaobj->access) {
+ case NV_MEM_ACCESS_VM:
+ break;
+ case NV_MEM_ACCESS_RO:
+ flags0 |= 0x00040000;
+ break;
+ case NV_MEM_ACCESS_WO:
+ case NV_MEM_ACCESS_RW:
+ flags0 |= 0x00080000;
+ break;
+ }
-static struct nouveau_ofuncs
-nvc0_dmaobj_ofuncs = {
- .ctor = nvc0_dmaobj_ctor,
- .dtor = _nouveau_dmaobj_dtor,
- .init = _nouveau_dmaobj_init,
- .fini = _nouveau_dmaobj_fini,
-};
+ ret = nouveau_gpuobj_new(parent, parent, 24, 32, 0, pgpuobj);
+ if (ret == 0) {
+ nv_wo32(*pgpuobj, 0x00, flags0);
+ nv_wo32(*pgpuobj, 0x04, lower_32_bits(dmaobj->limit));
+ nv_wo32(*pgpuobj, 0x08, lower_32_bits(dmaobj->start));
+ nv_wo32(*pgpuobj, 0x0c, upper_32_bits(dmaobj->limit) << 24 |
+ upper_32_bits(dmaobj->start));
+ nv_wo32(*pgpuobj, 0x10, 0x00000000);
+ nv_wo32(*pgpuobj, 0x14, flags5);
+ }
-static struct nouveau_oclass
-nvc0_dmaobj_sclass[] = {
- { 0x0002, &nvc0_dmaobj_ofuncs },
- { 0x0003, &nvc0_dmaobj_ofuncs },
- { 0x003d, &nvc0_dmaobj_ofuncs },
- {}
-};
+ return ret;
+}
static int
nvc0_dmaeng_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
@@ -83,7 +126,8 @@ nvc0_dmaeng_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
if (ret)
return ret;
- priv->base.base.sclass = nvc0_dmaobj_sclass;
+ nv_engine(priv)->sclass = nouveau_dmaobj_sclass;
+ priv->base.bind = nvc0_dmaobj_bind;
return 0;
}
diff --git a/drivers/gpu/drm/nouveau/core/engine/dmaobj/nvd0.c b/drivers/gpu/drm/nouveau/core/engine/dmaobj/nvd0.c
new file mode 100644
index 000000000000..d1528752980c
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/engine/dmaobj/nvd0.c
@@ -0,0 +1,122 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include <core/device.h>
+#include <core/gpuobj.h>
+#include <core/class.h>
+
+#include <subdev/fb.h>
+#include <engine/dmaobj.h>
+
+struct nvd0_dmaeng_priv {
+ struct nouveau_dmaeng base;
+};
+
+static int
+nvd0_dmaobj_bind(struct nouveau_dmaeng *dmaeng,
+ struct nouveau_object *parent,
+ struct nouveau_dmaobj *dmaobj,
+ struct nouveau_gpuobj **pgpuobj)
+{
+ u32 flags0 = 0x00000000;
+ int ret;
+
+ if (!nv_iclass(parent, NV_ENGCTX_CLASS)) {
+ switch (nv_mclass(parent->parent)) {
+ case NVD0_DISP_MAST_CLASS:
+ case NVD0_DISP_SYNC_CLASS:
+ case NVD0_DISP_OVLY_CLASS:
+ case NVE0_DISP_MAST_CLASS:
+ case NVE0_DISP_SYNC_CLASS:
+ case NVE0_DISP_OVLY_CLASS:
+ break;
+ default:
+ return -EINVAL;
+ }
+ } else
+ return 0;
+
+ if (!(dmaobj->conf0 & NVD0_DMA_CONF0_ENABLE)) {
+ if (dmaobj->target == NV_MEM_TARGET_VM) {
+ dmaobj->conf0 |= NVD0_DMA_CONF0_TYPE_VM;
+ dmaobj->conf0 |= NVD0_DMA_CONF0_PAGE_LP;
+ } else {
+ dmaobj->conf0 |= NVD0_DMA_CONF0_TYPE_LINEAR;
+ dmaobj->conf0 |= NVD0_DMA_CONF0_PAGE_SP;
+ }
+ }
+
+ flags0 |= (dmaobj->conf0 & NVD0_DMA_CONF0_TYPE) << 20;
+ flags0 |= (dmaobj->conf0 & NVD0_DMA_CONF0_PAGE) >> 4;
+
+ switch (dmaobj->target) {
+ case NV_MEM_TARGET_VRAM:
+ flags0 |= 0x00000009;
+ break;
+ default:
+ return -EINVAL;
+ break;
+ }
+
+ ret = nouveau_gpuobj_new(parent, parent, 24, 32, 0, pgpuobj);
+ if (ret == 0) {
+ nv_wo32(*pgpuobj, 0x00, flags0);
+ nv_wo32(*pgpuobj, 0x04, dmaobj->start >> 8);
+ nv_wo32(*pgpuobj, 0x08, dmaobj->limit >> 8);
+ nv_wo32(*pgpuobj, 0x0c, 0x00000000);
+ nv_wo32(*pgpuobj, 0x10, 0x00000000);
+ nv_wo32(*pgpuobj, 0x14, 0x00000000);
+ }
+
+ return ret;
+}
+
+static int
+nvd0_dmaeng_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
+ struct nouveau_oclass *oclass, void *data, u32 size,
+ struct nouveau_object **pobject)
+{
+ struct nvd0_dmaeng_priv *priv;
+ int ret;
+
+ ret = nouveau_dmaeng_create(parent, engine, oclass, &priv);
+ *pobject = nv_object(priv);
+ if (ret)
+ return ret;
+
+ nv_engine(priv)->sclass = nouveau_dmaobj_sclass;
+ priv->base.bind = nvd0_dmaobj_bind;
+ return 0;
+}
+
+struct nouveau_oclass
+nvd0_dmaeng_oclass = {
+ .handle = NV_ENGINE(DMAOBJ, 0xd0),
+ .ofuncs = &(struct nouveau_ofuncs) {
+ .ctor = nvd0_dmaeng_ctor,
+ .dtor = _nouveau_dmaeng_dtor,
+ .init = _nouveau_dmaeng_init,
+ .fini = _nouveau_dmaeng_fini,
+ },
+};
diff --git a/drivers/gpu/drm/nouveau/core/engine/fifo/base.c b/drivers/gpu/drm/nouveau/core/engine/fifo/base.c
index bbb43c67c2ae..c2b9db335816 100644
--- a/drivers/gpu/drm/nouveau/core/engine/fifo/base.c
+++ b/drivers/gpu/drm/nouveau/core/engine/fifo/base.c
@@ -24,6 +24,7 @@
#include <core/object.h>
#include <core/handle.h>
+#include <core/class.h>
#include <engine/dmaobj.h>
#include <engine/fifo.h>
@@ -33,7 +34,7 @@ nouveau_fifo_channel_create_(struct nouveau_object *parent,
struct nouveau_object *engine,
struct nouveau_oclass *oclass,
int bar, u32 addr, u32 size, u32 pushbuf,
- u32 engmask, int len, void **ptr)
+ u64 engmask, int len, void **ptr)
{
struct nouveau_device *device = nv_device(engine);
struct nouveau_fifo *priv = (void *)engine;
@@ -56,18 +57,16 @@ nouveau_fifo_channel_create_(struct nouveau_object *parent,
dmaeng = (void *)chan->pushdma->base.engine;
switch (chan->pushdma->base.oclass->handle) {
- case 0x0002:
- case 0x003d:
+ case NV_DMA_FROM_MEMORY_CLASS:
+ case NV_DMA_IN_MEMORY_CLASS:
break;
default:
return -EINVAL;
}
- if (dmaeng->bind) {
- ret = dmaeng->bind(dmaeng, parent, chan->pushdma, &chan->pushgpu);
- if (ret)
- return ret;
- }
+ ret = dmaeng->bind(dmaeng, parent, chan->pushdma, &chan->pushgpu);
+ if (ret)
+ return ret;
/* find a free fifo channel */
spin_lock_irqsave(&priv->lock, flags);
@@ -119,14 +118,14 @@ _nouveau_fifo_channel_dtor(struct nouveau_object *object)
}
u32
-_nouveau_fifo_channel_rd32(struct nouveau_object *object, u32 addr)
+_nouveau_fifo_channel_rd32(struct nouveau_object *object, u64 addr)
{
struct nouveau_fifo_chan *chan = (void *)object;
return ioread32_native(chan->user + addr);
}
void
-_nouveau_fifo_channel_wr32(struct nouveau_object *object, u32 addr, u32 data)
+_nouveau_fifo_channel_wr32(struct nouveau_object *object, u64 addr, u32 data)
{
struct nouveau_fifo_chan *chan = (void *)object;
iowrite32_native(data, chan->user + addr);
diff --git a/drivers/gpu/drm/nouveau/core/engine/fifo/nv04.c b/drivers/gpu/drm/nouveau/core/engine/fifo/nv04.c
index ea76e3e8c9c2..a47a8548f9e0 100644
--- a/drivers/gpu/drm/nouveau/core/engine/fifo/nv04.c
+++ b/drivers/gpu/drm/nouveau/core/engine/fifo/nv04.c
@@ -126,9 +126,9 @@ nv04_fifo_chan_ctor(struct nouveau_object *parent,
ret = nouveau_fifo_channel_create(parent, engine, oclass, 0, 0x800000,
0x10000, args->pushbuf,
- (1 << NVDEV_ENGINE_DMAOBJ) |
- (1 << NVDEV_ENGINE_SW) |
- (1 << NVDEV_ENGINE_GR), &chan);
+ (1ULL << NVDEV_ENGINE_DMAOBJ) |
+ (1ULL << NVDEV_ENGINE_SW) |
+ (1ULL << NVDEV_ENGINE_GR), &chan);
*pobject = nv_object(chan);
if (ret)
return ret;
@@ -440,7 +440,7 @@ nv04_fifo_intr(struct nouveau_subdev *subdev)
}
if (!nv04_fifo_swmthd(priv, chid, mthd, data)) {
- nv_info(priv, "CACHE_ERROR - Ch %d/%d "
+ nv_error(priv, "CACHE_ERROR - Ch %d/%d "
"Mthd 0x%04x Data 0x%08x\n",
chid, (mthd >> 13) & 7, mthd & 0x1ffc,
data);
@@ -476,7 +476,7 @@ nv04_fifo_intr(struct nouveau_subdev *subdev)
u32 ib_get = nv_rd32(priv, 0x003334);
u32 ib_put = nv_rd32(priv, 0x003330);
- nv_info(priv, "DMA_PUSHER - Ch %d Get 0x%02x%08x "
+ nv_error(priv, "DMA_PUSHER - Ch %d Get 0x%02x%08x "
"Put 0x%02x%08x IbGet 0x%08x IbPut 0x%08x "
"State 0x%08x (err: %s) Push 0x%08x\n",
chid, ho_get, dma_get, ho_put,
@@ -494,7 +494,7 @@ nv04_fifo_intr(struct nouveau_subdev *subdev)
nv_wr32(priv, 0x003334, ib_put);
}
} else {
- nv_info(priv, "DMA_PUSHER - Ch %d Get 0x%08x "
+ nv_error(priv, "DMA_PUSHER - Ch %d Get 0x%08x "
"Put 0x%08x State 0x%08x (err: %s) Push 0x%08x\n",
chid, dma_get, dma_put, state,
nv_dma_state_err(state), push);
@@ -525,14 +525,13 @@ nv04_fifo_intr(struct nouveau_subdev *subdev)
if (device->card_type == NV_50) {
if (status & 0x00000010) {
- nv50_fb_trap(nouveau_fb(priv), 1);
status &= ~0x00000010;
nv_wr32(priv, 0x002100, 0x00000010);
}
}
if (status) {
- nv_info(priv, "unknown intr 0x%08x, ch %d\n",
+ nv_warn(priv, "unknown intr 0x%08x, ch %d\n",
status, chid);
nv_wr32(priv, NV03_PFIFO_INTR_0, status);
status = 0;
@@ -542,7 +541,7 @@ nv04_fifo_intr(struct nouveau_subdev *subdev)
}
if (status) {
- nv_info(priv, "still angry after %d spins, halt\n", cnt);
+ nv_error(priv, "still angry after %d spins, halt\n", cnt);
nv_wr32(priv, 0x002140, 0);
nv_wr32(priv, 0x000140, 0);
}
diff --git a/drivers/gpu/drm/nouveau/core/engine/fifo/nv10.c b/drivers/gpu/drm/nouveau/core/engine/fifo/nv10.c
index 4ba75422b89d..2c927c1d173b 100644
--- a/drivers/gpu/drm/nouveau/core/engine/fifo/nv10.c
+++ b/drivers/gpu/drm/nouveau/core/engine/fifo/nv10.c
@@ -69,9 +69,9 @@ nv10_fifo_chan_ctor(struct nouveau_object *parent,
ret = nouveau_fifo_channel_create(parent, engine, oclass, 0, 0x800000,
0x10000, args->pushbuf,
- (1 << NVDEV_ENGINE_DMAOBJ) |
- (1 << NVDEV_ENGINE_SW) |
- (1 << NVDEV_ENGINE_GR), &chan);
+ (1ULL << NVDEV_ENGINE_DMAOBJ) |
+ (1ULL << NVDEV_ENGINE_SW) |
+ (1ULL << NVDEV_ENGINE_GR), &chan);
*pobject = nv_object(chan);
if (ret)
return ret;
diff --git a/drivers/gpu/drm/nouveau/core/engine/fifo/nv17.c b/drivers/gpu/drm/nouveau/core/engine/fifo/nv17.c
index b96e6b0ae2b1..a9cb51d38c57 100644
--- a/drivers/gpu/drm/nouveau/core/engine/fifo/nv17.c
+++ b/drivers/gpu/drm/nouveau/core/engine/fifo/nv17.c
@@ -74,10 +74,10 @@ nv17_fifo_chan_ctor(struct nouveau_object *parent,
ret = nouveau_fifo_channel_create(parent, engine, oclass, 0, 0x800000,
0x10000, args->pushbuf,
- (1 << NVDEV_ENGINE_DMAOBJ) |
- (1 << NVDEV_ENGINE_SW) |
- (1 << NVDEV_ENGINE_GR) |
- (1 << NVDEV_ENGINE_MPEG), /* NV31- */
+ (1ULL << NVDEV_ENGINE_DMAOBJ) |
+ (1ULL << NVDEV_ENGINE_SW) |
+ (1ULL << NVDEV_ENGINE_GR) |
+ (1ULL << NVDEV_ENGINE_MPEG), /* NV31- */
&chan);
*pobject = nv_object(chan);
if (ret)
diff --git a/drivers/gpu/drm/nouveau/core/engine/fifo/nv40.c b/drivers/gpu/drm/nouveau/core/engine/fifo/nv40.c
index 559c3b4e1b86..2b1f91721225 100644
--- a/drivers/gpu/drm/nouveau/core/engine/fifo/nv40.c
+++ b/drivers/gpu/drm/nouveau/core/engine/fifo/nv40.c
@@ -192,10 +192,10 @@ nv40_fifo_chan_ctor(struct nouveau_object *parent,
ret = nouveau_fifo_channel_create(parent, engine, oclass, 0, 0xc00000,
0x1000, args->pushbuf,
- (1 << NVDEV_ENGINE_DMAOBJ) |
- (1 << NVDEV_ENGINE_SW) |
- (1 << NVDEV_ENGINE_GR) |
- (1 << NVDEV_ENGINE_MPEG), &chan);
+ (1ULL << NVDEV_ENGINE_DMAOBJ) |
+ (1ULL << NVDEV_ENGINE_SW) |
+ (1ULL << NVDEV_ENGINE_GR) |
+ (1ULL << NVDEV_ENGINE_MPEG), &chan);
*pobject = nv_object(chan);
if (ret)
return ret;
diff --git a/drivers/gpu/drm/nouveau/core/engine/fifo/nv50.c b/drivers/gpu/drm/nouveau/core/engine/fifo/nv50.c
index 536e7634a00d..bd096364f680 100644
--- a/drivers/gpu/drm/nouveau/core/engine/fifo/nv50.c
+++ b/drivers/gpu/drm/nouveau/core/engine/fifo/nv50.c
@@ -112,14 +112,6 @@ nv50_fifo_context_detach(struct nouveau_object *parent, bool suspend,
return -EINVAL;
}
- nv_wo32(base->eng, addr + 0x00, 0x00000000);
- nv_wo32(base->eng, addr + 0x04, 0x00000000);
- nv_wo32(base->eng, addr + 0x08, 0x00000000);
- nv_wo32(base->eng, addr + 0x0c, 0x00000000);
- nv_wo32(base->eng, addr + 0x10, 0x00000000);
- nv_wo32(base->eng, addr + 0x14, 0x00000000);
- bar->flush(bar);
-
/* HW bug workaround:
*
* PFIFO will hang forever if the connected engines don't report
@@ -141,8 +133,18 @@ nv50_fifo_context_detach(struct nouveau_object *parent, bool suspend,
if (suspend)
ret = -EBUSY;
}
-
nv_wr32(priv, 0x00b860, me);
+
+ if (ret == 0) {
+ nv_wo32(base->eng, addr + 0x00, 0x00000000);
+ nv_wo32(base->eng, addr + 0x04, 0x00000000);
+ nv_wo32(base->eng, addr + 0x08, 0x00000000);
+ nv_wo32(base->eng, addr + 0x0c, 0x00000000);
+ nv_wo32(base->eng, addr + 0x10, 0x00000000);
+ nv_wo32(base->eng, addr + 0x14, 0x00000000);
+ bar->flush(bar);
+ }
+
return ret;
}
@@ -194,10 +196,10 @@ nv50_fifo_chan_ctor_dma(struct nouveau_object *parent,
ret = nouveau_fifo_channel_create(parent, engine, oclass, 0, 0xc00000,
0x2000, args->pushbuf,
- (1 << NVDEV_ENGINE_DMAOBJ) |
- (1 << NVDEV_ENGINE_SW) |
- (1 << NVDEV_ENGINE_GR) |
- (1 << NVDEV_ENGINE_MPEG), &chan);
+ (1ULL << NVDEV_ENGINE_DMAOBJ) |
+ (1ULL << NVDEV_ENGINE_SW) |
+ (1ULL << NVDEV_ENGINE_GR) |
+ (1ULL << NVDEV_ENGINE_MPEG), &chan);
*pobject = nv_object(chan);
if (ret)
return ret;
@@ -247,10 +249,10 @@ nv50_fifo_chan_ctor_ind(struct nouveau_object *parent,
ret = nouveau_fifo_channel_create(parent, engine, oclass, 0, 0xc00000,
0x2000, args->pushbuf,
- (1 << NVDEV_ENGINE_DMAOBJ) |
- (1 << NVDEV_ENGINE_SW) |
- (1 << NVDEV_ENGINE_GR) |
- (1 << NVDEV_ENGINE_MPEG), &chan);
+ (1ULL << NVDEV_ENGINE_DMAOBJ) |
+ (1ULL << NVDEV_ENGINE_SW) |
+ (1ULL << NVDEV_ENGINE_GR) |
+ (1ULL << NVDEV_ENGINE_MPEG), &chan);
*pobject = nv_object(chan);
if (ret)
return ret;
diff --git a/drivers/gpu/drm/nouveau/core/engine/fifo/nv84.c b/drivers/gpu/drm/nouveau/core/engine/fifo/nv84.c
index b4fd26d8f166..1eb1c512f503 100644
--- a/drivers/gpu/drm/nouveau/core/engine/fifo/nv84.c
+++ b/drivers/gpu/drm/nouveau/core/engine/fifo/nv84.c
@@ -95,14 +95,6 @@ nv84_fifo_context_detach(struct nouveau_object *parent, bool suspend,
return -EINVAL;
}
- nv_wo32(base->eng, addr + 0x00, 0x00000000);
- nv_wo32(base->eng, addr + 0x04, 0x00000000);
- nv_wo32(base->eng, addr + 0x08, 0x00000000);
- nv_wo32(base->eng, addr + 0x0c, 0x00000000);
- nv_wo32(base->eng, addr + 0x10, 0x00000000);
- nv_wo32(base->eng, addr + 0x14, 0x00000000);
- bar->flush(bar);
-
save = nv_mask(priv, 0x002520, 0x0000003f, 1 << engn);
nv_wr32(priv, 0x0032fc, nv_gpuobj(base)->addr >> 12);
done = nv_wait_ne(priv, 0x0032fc, 0xffffffff, 0xffffffff);
@@ -112,6 +104,14 @@ nv84_fifo_context_detach(struct nouveau_object *parent, bool suspend,
if (suspend)
return -EBUSY;
}
+
+ nv_wo32(base->eng, addr + 0x00, 0x00000000);
+ nv_wo32(base->eng, addr + 0x04, 0x00000000);
+ nv_wo32(base->eng, addr + 0x08, 0x00000000);
+ nv_wo32(base->eng, addr + 0x0c, 0x00000000);
+ nv_wo32(base->eng, addr + 0x10, 0x00000000);
+ nv_wo32(base->eng, addr + 0x14, 0x00000000);
+ bar->flush(bar);
return 0;
}
@@ -163,17 +163,17 @@ nv84_fifo_chan_ctor_dma(struct nouveau_object *parent,
ret = nouveau_fifo_channel_create(parent, engine, oclass, 0, 0xc00000,
0x2000, args->pushbuf,
- (1 << NVDEV_ENGINE_DMAOBJ) |
- (1 << NVDEV_ENGINE_SW) |
- (1 << NVDEV_ENGINE_GR) |
- (1 << NVDEV_ENGINE_MPEG) |
- (1 << NVDEV_ENGINE_ME) |
- (1 << NVDEV_ENGINE_VP) |
- (1 << NVDEV_ENGINE_CRYPT) |
- (1 << NVDEV_ENGINE_BSP) |
- (1 << NVDEV_ENGINE_PPP) |
- (1 << NVDEV_ENGINE_COPY0) |
- (1 << NVDEV_ENGINE_UNK1C1), &chan);
+ (1ULL << NVDEV_ENGINE_DMAOBJ) |
+ (1ULL << NVDEV_ENGINE_SW) |
+ (1ULL << NVDEV_ENGINE_GR) |
+ (1ULL << NVDEV_ENGINE_MPEG) |
+ (1ULL << NVDEV_ENGINE_ME) |
+ (1ULL << NVDEV_ENGINE_VP) |
+ (1ULL << NVDEV_ENGINE_CRYPT) |
+ (1ULL << NVDEV_ENGINE_BSP) |
+ (1ULL << NVDEV_ENGINE_PPP) |
+ (1ULL << NVDEV_ENGINE_COPY0) |
+ (1ULL << NVDEV_ENGINE_UNK1C1), &chan);
*pobject = nv_object(chan);
if (ret)
return ret;
@@ -225,17 +225,17 @@ nv84_fifo_chan_ctor_ind(struct nouveau_object *parent,
ret = nouveau_fifo_channel_create(parent, engine, oclass, 0, 0xc00000,
0x2000, args->pushbuf,
- (1 << NVDEV_ENGINE_DMAOBJ) |
- (1 << NVDEV_ENGINE_SW) |
- (1 << NVDEV_ENGINE_GR) |
- (1 << NVDEV_ENGINE_MPEG) |
- (1 << NVDEV_ENGINE_ME) |
- (1 << NVDEV_ENGINE_VP) |
- (1 << NVDEV_ENGINE_CRYPT) |
- (1 << NVDEV_ENGINE_BSP) |
- (1 << NVDEV_ENGINE_PPP) |
- (1 << NVDEV_ENGINE_COPY0) |
- (1 << NVDEV_ENGINE_UNK1C1), &chan);
+ (1ULL << NVDEV_ENGINE_DMAOBJ) |
+ (1ULL << NVDEV_ENGINE_SW) |
+ (1ULL << NVDEV_ENGINE_GR) |
+ (1ULL << NVDEV_ENGINE_MPEG) |
+ (1ULL << NVDEV_ENGINE_ME) |
+ (1ULL << NVDEV_ENGINE_VP) |
+ (1ULL << NVDEV_ENGINE_CRYPT) |
+ (1ULL << NVDEV_ENGINE_BSP) |
+ (1ULL << NVDEV_ENGINE_PPP) |
+ (1ULL << NVDEV_ENGINE_COPY0) |
+ (1ULL << NVDEV_ENGINE_UNK1C1), &chan);
*pobject = nv_object(chan);
if (ret)
return ret;
diff --git a/drivers/gpu/drm/nouveau/core/engine/fifo/nvc0.c b/drivers/gpu/drm/nouveau/core/engine/fifo/nvc0.c
index 6f21be600557..b4365dde1859 100644
--- a/drivers/gpu/drm/nouveau/core/engine/fifo/nvc0.c
+++ b/drivers/gpu/drm/nouveau/core/engine/fifo/nvc0.c
@@ -103,6 +103,9 @@ nvc0_fifo_context_attach(struct nouveau_object *parent,
case NVDEV_ENGINE_GR : addr = 0x0210; break;
case NVDEV_ENGINE_COPY0: addr = 0x0230; break;
case NVDEV_ENGINE_COPY1: addr = 0x0240; break;
+ case NVDEV_ENGINE_BSP : addr = 0x0270; break;
+ case NVDEV_ENGINE_VP : addr = 0x0250; break;
+ case NVDEV_ENGINE_PPP : addr = 0x0260; break;
default:
return -EINVAL;
}
@@ -137,14 +140,13 @@ nvc0_fifo_context_detach(struct nouveau_object *parent, bool suspend,
case NVDEV_ENGINE_GR : addr = 0x0210; break;
case NVDEV_ENGINE_COPY0: addr = 0x0230; break;
case NVDEV_ENGINE_COPY1: addr = 0x0240; break;
+ case NVDEV_ENGINE_BSP : addr = 0x0270; break;
+ case NVDEV_ENGINE_VP : addr = 0x0250; break;
+ case NVDEV_ENGINE_PPP : addr = 0x0260; break;
default:
return -EINVAL;
}
- nv_wo32(base, addr + 0x00, 0x00000000);
- nv_wo32(base, addr + 0x04, 0x00000000);
- bar->flush(bar);
-
nv_wr32(priv, 0x002634, chan->base.chid);
if (!nv_wait(priv, 0x002634, 0xffffffff, chan->base.chid)) {
nv_error(priv, "channel %d kick timeout\n", chan->base.chid);
@@ -152,6 +154,9 @@ nvc0_fifo_context_detach(struct nouveau_object *parent, bool suspend,
return -EBUSY;
}
+ nv_wo32(base, addr + 0x00, 0x00000000);
+ nv_wo32(base, addr + 0x04, 0x00000000);
+ bar->flush(bar);
return 0;
}
@@ -175,10 +180,13 @@ nvc0_fifo_chan_ctor(struct nouveau_object *parent,
ret = nouveau_fifo_channel_create(parent, engine, oclass, 1,
priv->user.bar.offset, 0x1000,
args->pushbuf,
- (1 << NVDEV_ENGINE_SW) |
- (1 << NVDEV_ENGINE_GR) |
- (1 << NVDEV_ENGINE_COPY0) |
- (1 << NVDEV_ENGINE_COPY1), &chan);
+ (1ULL << NVDEV_ENGINE_SW) |
+ (1ULL << NVDEV_ENGINE_GR) |
+ (1ULL << NVDEV_ENGINE_COPY0) |
+ (1ULL << NVDEV_ENGINE_COPY1) |
+ (1ULL << NVDEV_ENGINE_BSP) |
+ (1ULL << NVDEV_ENGINE_VP) |
+ (1ULL << NVDEV_ENGINE_PPP), &chan);
*pobject = nv_object(chan);
if (ret)
return ret;
@@ -494,7 +502,7 @@ nvc0_fifo_intr(struct nouveau_subdev *subdev)
u32 stat = nv_rd32(priv, 0x002100) & mask;
if (stat & 0x00000100) {
- nv_info(priv, "unknown status 0x00000100\n");
+ nv_warn(priv, "unknown status 0x00000100\n");
nv_wr32(priv, 0x002100, 0x00000100);
stat &= ~0x00000100;
}
diff --git a/drivers/gpu/drm/nouveau/core/engine/fifo/nve0.c b/drivers/gpu/drm/nouveau/core/engine/fifo/nve0.c
index 36e81b6fafbc..c930da99c2c1 100644
--- a/drivers/gpu/drm/nouveau/core/engine/fifo/nve0.c
+++ b/drivers/gpu/drm/nouveau/core/engine/fifo/nve0.c
@@ -38,12 +38,12 @@
#include <engine/dmaobj.h>
#include <engine/fifo.h>
-#define _(a,b) { (a), ((1 << (a)) | (b)) }
+#define _(a,b) { (a), ((1ULL << (a)) | (b)) }
static const struct {
- int subdev;
- u32 mask;
+ u64 subdev;
+ u64 mask;
} fifo_engine[] = {
- _(NVDEV_ENGINE_GR , (1 << NVDEV_ENGINE_SW)),
+ _(NVDEV_ENGINE_GR , (1ULL << NVDEV_ENGINE_SW)),
_(NVDEV_ENGINE_VP , 0),
_(NVDEV_ENGINE_PPP , 0),
_(NVDEV_ENGINE_BSP , 0),
@@ -138,6 +138,9 @@ nve0_fifo_context_attach(struct nouveau_object *parent,
case NVDEV_ENGINE_GR :
case NVDEV_ENGINE_COPY0:
case NVDEV_ENGINE_COPY1: addr = 0x0210; break;
+ case NVDEV_ENGINE_BSP : addr = 0x0270; break;
+ case NVDEV_ENGINE_VP : addr = 0x0250; break;
+ case NVDEV_ENGINE_PPP : addr = 0x0260; break;
default:
return -EINVAL;
}
@@ -172,14 +175,13 @@ nve0_fifo_context_detach(struct nouveau_object *parent, bool suspend,
case NVDEV_ENGINE_GR :
case NVDEV_ENGINE_COPY0:
case NVDEV_ENGINE_COPY1: addr = 0x0210; break;
+ case NVDEV_ENGINE_BSP : addr = 0x0270; break;
+ case NVDEV_ENGINE_VP : addr = 0x0250; break;
+ case NVDEV_ENGINE_PPP : addr = 0x0260; break;
default:
return -EINVAL;
}
- nv_wo32(base, addr + 0x00, 0x00000000);
- nv_wo32(base, addr + 0x04, 0x00000000);
- bar->flush(bar);
-
nv_wr32(priv, 0x002634, chan->base.chid);
if (!nv_wait(priv, 0x002634, 0xffffffff, chan->base.chid)) {
nv_error(priv, "channel %d kick timeout\n", chan->base.chid);
@@ -187,6 +189,9 @@ nve0_fifo_context_detach(struct nouveau_object *parent, bool suspend,
return -EBUSY;
}
+ nv_wo32(base, addr + 0x00, 0x00000000);
+ nv_wo32(base, addr + 0x04, 0x00000000);
+ bar->flush(bar);
return 0;
}
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/ctxnv40.c b/drivers/gpu/drm/nouveau/core/engine/graph/ctxnv40.c
index e45035efb8ca..7bbb1e1b7a8d 100644
--- a/drivers/gpu/drm/nouveau/core/engine/graph/ctxnv40.c
+++ b/drivers/gpu/drm/nouveau/core/engine/graph/ctxnv40.c
@@ -669,21 +669,27 @@ nv40_grctx_fill(struct nouveau_device *device, struct nouveau_gpuobj *mem)
});
}
-void
+int
nv40_grctx_init(struct nouveau_device *device, u32 *size)
{
- u32 ctxprog[256], i;
+ u32 *ctxprog = kmalloc(256 * 4, GFP_KERNEL), i;
struct nouveau_grctx ctx = {
.device = device,
.mode = NOUVEAU_GRCTX_PROG,
.data = ctxprog,
- .ctxprog_max = ARRAY_SIZE(ctxprog)
+ .ctxprog_max = 256,
};
+ if (!ctxprog)
+ return -ENOMEM;
+
nv40_grctx_generate(&ctx);
nv_wr32(device, 0x400324, 0);
for (i = 0; i < ctx.ctxprog_len; i++)
nv_wr32(device, 0x400328, ctxprog[i]);
*size = ctx.ctxvals_pos * 4;
+
+ kfree(ctxprog);
+ return 0;
}
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/nv04.c b/drivers/gpu/drm/nouveau/core/engine/graph/nv04.c
index 618528248457..e30a9c5ff1fc 100644
--- a/drivers/gpu/drm/nouveau/core/engine/graph/nv04.c
+++ b/drivers/gpu/drm/nouveau/core/engine/graph/nv04.c
@@ -787,168 +787,168 @@ nv01_graph_mthd_bind_chroma(struct nouveau_object *object, u32 mthd,
static struct nouveau_omthds
nv03_graph_gdi_omthds[] = {
- { 0x0184, nv01_graph_mthd_bind_patt },
- { 0x0188, nv04_graph_mthd_bind_rop },
- { 0x018c, nv04_graph_mthd_bind_beta1 },
- { 0x0190, nv04_graph_mthd_bind_surf_dst },
- { 0x02fc, nv04_graph_mthd_set_operation },
+ { 0x0184, 0x0184, nv01_graph_mthd_bind_patt },
+ { 0x0188, 0x0188, nv04_graph_mthd_bind_rop },
+ { 0x018c, 0x018c, nv04_graph_mthd_bind_beta1 },
+ { 0x0190, 0x0190, nv04_graph_mthd_bind_surf_dst },
+ { 0x02fc, 0x02fc, nv04_graph_mthd_set_operation },
{}
};
static struct nouveau_omthds
nv04_graph_gdi_omthds[] = {
- { 0x0188, nv04_graph_mthd_bind_patt },
- { 0x018c, nv04_graph_mthd_bind_rop },
- { 0x0190, nv04_graph_mthd_bind_beta1 },
- { 0x0194, nv04_graph_mthd_bind_beta4 },
- { 0x0198, nv04_graph_mthd_bind_surf2d },
- { 0x02fc, nv04_graph_mthd_set_operation },
+ { 0x0188, 0x0188, nv04_graph_mthd_bind_patt },
+ { 0x018c, 0x018c, nv04_graph_mthd_bind_rop },
+ { 0x0190, 0x0190, nv04_graph_mthd_bind_beta1 },
+ { 0x0194, 0x0194, nv04_graph_mthd_bind_beta4 },
+ { 0x0198, 0x0198, nv04_graph_mthd_bind_surf2d },
+ { 0x02fc, 0x02fc, nv04_graph_mthd_set_operation },
{}
};
static struct nouveau_omthds
nv01_graph_blit_omthds[] = {
- { 0x0184, nv01_graph_mthd_bind_chroma },
- { 0x0188, nv01_graph_mthd_bind_clip },
- { 0x018c, nv01_graph_mthd_bind_patt },
- { 0x0190, nv04_graph_mthd_bind_rop },
- { 0x0194, nv04_graph_mthd_bind_beta1 },
- { 0x0198, nv04_graph_mthd_bind_surf_dst },
- { 0x019c, nv04_graph_mthd_bind_surf_src },
- { 0x02fc, nv04_graph_mthd_set_operation },
+ { 0x0184, 0x0184, nv01_graph_mthd_bind_chroma },
+ { 0x0188, 0x0188, nv01_graph_mthd_bind_clip },
+ { 0x018c, 0x018c, nv01_graph_mthd_bind_patt },
+ { 0x0190, 0x0190, nv04_graph_mthd_bind_rop },
+ { 0x0194, 0x0194, nv04_graph_mthd_bind_beta1 },
+ { 0x0198, 0x0198, nv04_graph_mthd_bind_surf_dst },
+ { 0x019c, 0x019c, nv04_graph_mthd_bind_surf_src },
+ { 0x02fc, 0x02fc, nv04_graph_mthd_set_operation },
{}
};
static struct nouveau_omthds
nv04_graph_blit_omthds[] = {
- { 0x0184, nv01_graph_mthd_bind_chroma },
- { 0x0188, nv01_graph_mthd_bind_clip },
- { 0x018c, nv04_graph_mthd_bind_patt },
- { 0x0190, nv04_graph_mthd_bind_rop },
- { 0x0194, nv04_graph_mthd_bind_beta1 },
- { 0x0198, nv04_graph_mthd_bind_beta4 },
- { 0x019c, nv04_graph_mthd_bind_surf2d },
- { 0x02fc, nv04_graph_mthd_set_operation },
+ { 0x0184, 0x0184, nv01_graph_mthd_bind_chroma },
+ { 0x0188, 0x0188, nv01_graph_mthd_bind_clip },
+ { 0x018c, 0x018c, nv04_graph_mthd_bind_patt },
+ { 0x0190, 0x0190, nv04_graph_mthd_bind_rop },
+ { 0x0194, 0x0194, nv04_graph_mthd_bind_beta1 },
+ { 0x0198, 0x0198, nv04_graph_mthd_bind_beta4 },
+ { 0x019c, 0x019c, nv04_graph_mthd_bind_surf2d },
+ { 0x02fc, 0x02fc, nv04_graph_mthd_set_operation },
{}
};
static struct nouveau_omthds
nv04_graph_iifc_omthds[] = {
- { 0x0188, nv01_graph_mthd_bind_chroma },
- { 0x018c, nv01_graph_mthd_bind_clip },
- { 0x0190, nv04_graph_mthd_bind_patt },
- { 0x0194, nv04_graph_mthd_bind_rop },
- { 0x0198, nv04_graph_mthd_bind_beta1 },
- { 0x019c, nv04_graph_mthd_bind_beta4 },
- { 0x01a0, nv04_graph_mthd_bind_surf2d_swzsurf },
- { 0x03e4, nv04_graph_mthd_set_operation },
+ { 0x0188, 0x0188, nv01_graph_mthd_bind_chroma },
+ { 0x018c, 0x018c, nv01_graph_mthd_bind_clip },
+ { 0x0190, 0x0190, nv04_graph_mthd_bind_patt },
+ { 0x0194, 0x0194, nv04_graph_mthd_bind_rop },
+ { 0x0198, 0x0198, nv04_graph_mthd_bind_beta1 },
+ { 0x019c, 0x019c, nv04_graph_mthd_bind_beta4 },
+ { 0x01a0, 0x01a0, nv04_graph_mthd_bind_surf2d_swzsurf },
+ { 0x03e4, 0x03e4, nv04_graph_mthd_set_operation },
{}
};
static struct nouveau_omthds
nv01_graph_ifc_omthds[] = {
- { 0x0184, nv01_graph_mthd_bind_chroma },
- { 0x0188, nv01_graph_mthd_bind_clip },
- { 0x018c, nv01_graph_mthd_bind_patt },
- { 0x0190, nv04_graph_mthd_bind_rop },
- { 0x0194, nv04_graph_mthd_bind_beta1 },
- { 0x0198, nv04_graph_mthd_bind_surf_dst },
- { 0x02fc, nv04_graph_mthd_set_operation },
+ { 0x0184, 0x0184, nv01_graph_mthd_bind_chroma },
+ { 0x0188, 0x0188, nv01_graph_mthd_bind_clip },
+ { 0x018c, 0x018c, nv01_graph_mthd_bind_patt },
+ { 0x0190, 0x0190, nv04_graph_mthd_bind_rop },
+ { 0x0194, 0x0194, nv04_graph_mthd_bind_beta1 },
+ { 0x0198, 0x0198, nv04_graph_mthd_bind_surf_dst },
+ { 0x02fc, 0x02fc, nv04_graph_mthd_set_operation },
{}
};
static struct nouveau_omthds
nv04_graph_ifc_omthds[] = {
- { 0x0184, nv01_graph_mthd_bind_chroma },
- { 0x0188, nv01_graph_mthd_bind_clip },
- { 0x018c, nv04_graph_mthd_bind_patt },
- { 0x0190, nv04_graph_mthd_bind_rop },
- { 0x0194, nv04_graph_mthd_bind_beta1 },
- { 0x0198, nv04_graph_mthd_bind_beta4 },
- { 0x019c, nv04_graph_mthd_bind_surf2d },
- { 0x02fc, nv04_graph_mthd_set_operation },
+ { 0x0184, 0x0184, nv01_graph_mthd_bind_chroma },
+ { 0x0188, 0x0188, nv01_graph_mthd_bind_clip },
+ { 0x018c, 0x018c, nv04_graph_mthd_bind_patt },
+ { 0x0190, 0x0190, nv04_graph_mthd_bind_rop },
+ { 0x0194, 0x0194, nv04_graph_mthd_bind_beta1 },
+ { 0x0198, 0x0198, nv04_graph_mthd_bind_beta4 },
+ { 0x019c, 0x019c, nv04_graph_mthd_bind_surf2d },
+ { 0x02fc, 0x02fc, nv04_graph_mthd_set_operation },
{}
};
static struct nouveau_omthds
nv03_graph_sifc_omthds[] = {
- { 0x0184, nv01_graph_mthd_bind_chroma },
- { 0x0188, nv01_graph_mthd_bind_patt },
- { 0x018c, nv04_graph_mthd_bind_rop },
- { 0x0190, nv04_graph_mthd_bind_beta1 },
- { 0x0194, nv04_graph_mthd_bind_surf_dst },
- { 0x02fc, nv04_graph_mthd_set_operation },
+ { 0x0184, 0x0184, nv01_graph_mthd_bind_chroma },
+ { 0x0188, 0x0188, nv01_graph_mthd_bind_patt },
+ { 0x018c, 0x018c, nv04_graph_mthd_bind_rop },
+ { 0x0190, 0x0190, nv04_graph_mthd_bind_beta1 },
+ { 0x0194, 0x0194, nv04_graph_mthd_bind_surf_dst },
+ { 0x02fc, 0x02fc, nv04_graph_mthd_set_operation },
{}
};
static struct nouveau_omthds
nv04_graph_sifc_omthds[] = {
- { 0x0184, nv01_graph_mthd_bind_chroma },
- { 0x0188, nv04_graph_mthd_bind_patt },
- { 0x018c, nv04_graph_mthd_bind_rop },
- { 0x0190, nv04_graph_mthd_bind_beta1 },
- { 0x0194, nv04_graph_mthd_bind_beta4 },
- { 0x0198, nv04_graph_mthd_bind_surf2d },
- { 0x02fc, nv04_graph_mthd_set_operation },
+ { 0x0184, 0x0184, nv01_graph_mthd_bind_chroma },
+ { 0x0188, 0x0188, nv04_graph_mthd_bind_patt },
+ { 0x018c, 0x018c, nv04_graph_mthd_bind_rop },
+ { 0x0190, 0x0190, nv04_graph_mthd_bind_beta1 },
+ { 0x0194, 0x0194, nv04_graph_mthd_bind_beta4 },
+ { 0x0198, 0x0198, nv04_graph_mthd_bind_surf2d },
+ { 0x02fc, 0x02fc, nv04_graph_mthd_set_operation },
{}
};
static struct nouveau_omthds
nv03_graph_sifm_omthds[] = {
- { 0x0188, nv01_graph_mthd_bind_patt },
- { 0x018c, nv04_graph_mthd_bind_rop },
- { 0x0190, nv04_graph_mthd_bind_beta1 },
- { 0x0194, nv04_graph_mthd_bind_surf_dst },
- { 0x0304, nv04_graph_mthd_set_operation },
+ { 0x0188, 0x0188, nv01_graph_mthd_bind_patt },
+ { 0x018c, 0x018c, nv04_graph_mthd_bind_rop },
+ { 0x0190, 0x0190, nv04_graph_mthd_bind_beta1 },
+ { 0x0194, 0x0194, nv04_graph_mthd_bind_surf_dst },
+ { 0x0304, 0x0304, nv04_graph_mthd_set_operation },
{}
};
static struct nouveau_omthds
nv04_graph_sifm_omthds[] = {
- { 0x0188, nv04_graph_mthd_bind_patt },
- { 0x018c, nv04_graph_mthd_bind_rop },
- { 0x0190, nv04_graph_mthd_bind_beta1 },
- { 0x0194, nv04_graph_mthd_bind_beta4 },
- { 0x0198, nv04_graph_mthd_bind_surf2d },
- { 0x0304, nv04_graph_mthd_set_operation },
+ { 0x0188, 0x0188, nv04_graph_mthd_bind_patt },
+ { 0x018c, 0x018c, nv04_graph_mthd_bind_rop },
+ { 0x0190, 0x0190, nv04_graph_mthd_bind_beta1 },
+ { 0x0194, 0x0194, nv04_graph_mthd_bind_beta4 },
+ { 0x0198, 0x0198, nv04_graph_mthd_bind_surf2d },
+ { 0x0304, 0x0304, nv04_graph_mthd_set_operation },
{}
};
static struct nouveau_omthds
nv04_graph_surf3d_omthds[] = {
- { 0x02f8, nv04_graph_mthd_surf3d_clip_h },
- { 0x02fc, nv04_graph_mthd_surf3d_clip_v },
+ { 0x02f8, 0x02f8, nv04_graph_mthd_surf3d_clip_h },
+ { 0x02fc, 0x02fc, nv04_graph_mthd_surf3d_clip_v },
{}
};
static struct nouveau_omthds
nv03_graph_ttri_omthds[] = {
- { 0x0188, nv01_graph_mthd_bind_clip },
- { 0x018c, nv04_graph_mthd_bind_surf_color },
- { 0x0190, nv04_graph_mthd_bind_surf_zeta },
+ { 0x0188, 0x0188, nv01_graph_mthd_bind_clip },
+ { 0x018c, 0x018c, nv04_graph_mthd_bind_surf_color },
+ { 0x0190, 0x0190, nv04_graph_mthd_bind_surf_zeta },
{}
};
static struct nouveau_omthds
nv01_graph_prim_omthds[] = {
- { 0x0184, nv01_graph_mthd_bind_clip },
- { 0x0188, nv01_graph_mthd_bind_patt },
- { 0x018c, nv04_graph_mthd_bind_rop },
- { 0x0190, nv04_graph_mthd_bind_beta1 },
- { 0x0194, nv04_graph_mthd_bind_surf_dst },
- { 0x02fc, nv04_graph_mthd_set_operation },
+ { 0x0184, 0x0184, nv01_graph_mthd_bind_clip },
+ { 0x0188, 0x0188, nv01_graph_mthd_bind_patt },
+ { 0x018c, 0x018c, nv04_graph_mthd_bind_rop },
+ { 0x0190, 0x0190, nv04_graph_mthd_bind_beta1 },
+ { 0x0194, 0x0194, nv04_graph_mthd_bind_surf_dst },
+ { 0x02fc, 0x02fc, nv04_graph_mthd_set_operation },
{}
};
static struct nouveau_omthds
nv04_graph_prim_omthds[] = {
- { 0x0184, nv01_graph_mthd_bind_clip },
- { 0x0188, nv04_graph_mthd_bind_patt },
- { 0x018c, nv04_graph_mthd_bind_rop },
- { 0x0190, nv04_graph_mthd_bind_beta1 },
- { 0x0194, nv04_graph_mthd_bind_beta4 },
- { 0x0198, nv04_graph_mthd_bind_surf2d },
- { 0x02fc, nv04_graph_mthd_set_operation },
+ { 0x0184, 0x0184, nv01_graph_mthd_bind_clip },
+ { 0x0188, 0x0188, nv04_graph_mthd_bind_patt },
+ { 0x018c, 0x018c, nv04_graph_mthd_bind_rop },
+ { 0x0190, 0x0190, nv04_graph_mthd_bind_beta1 },
+ { 0x0194, 0x0194, nv04_graph_mthd_bind_beta4 },
+ { 0x0198, 0x0198, nv04_graph_mthd_bind_surf2d },
+ { 0x02fc, 0x02fc, nv04_graph_mthd_set_operation },
{}
};
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/nv10.c b/drivers/gpu/drm/nouveau/core/engine/graph/nv10.c
index 92521c89e77f..5c0f843ea249 100644
--- a/drivers/gpu/drm/nouveau/core/engine/graph/nv10.c
+++ b/drivers/gpu/drm/nouveau/core/engine/graph/nv10.c
@@ -570,11 +570,11 @@ nv17_graph_mthd_lma_enable(struct nouveau_object *object, u32 mthd,
static struct nouveau_omthds
nv17_celcius_omthds[] = {
- { 0x1638, nv17_graph_mthd_lma_window },
- { 0x163c, nv17_graph_mthd_lma_window },
- { 0x1640, nv17_graph_mthd_lma_window },
- { 0x1644, nv17_graph_mthd_lma_window },
- { 0x1658, nv17_graph_mthd_lma_enable },
+ { 0x1638, 0x1638, nv17_graph_mthd_lma_window },
+ { 0x163c, 0x163c, nv17_graph_mthd_lma_window },
+ { 0x1640, 0x1640, nv17_graph_mthd_lma_window },
+ { 0x1644, 0x1644, nv17_graph_mthd_lma_window },
+ { 0x1658, 0x1658, nv17_graph_mthd_lma_enable },
{}
};
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/nv20.c b/drivers/gpu/drm/nouveau/core/engine/graph/nv20.c
index 8f3f619c4a78..5b20401bf911 100644
--- a/drivers/gpu/drm/nouveau/core/engine/graph/nv20.c
+++ b/drivers/gpu/drm/nouveau/core/engine/graph/nv20.c
@@ -183,7 +183,7 @@ nv20_graph_tile_prog(struct nouveau_engine *engine, int i)
nv_wr32(priv, NV10_PGRAPH_RDI_INDEX, 0x00EA0010 + 4 * i);
nv_wr32(priv, NV10_PGRAPH_RDI_DATA, tile->addr);
- if (nv_device(engine)->card_type == NV_20) {
+ if (nv_device(engine)->chipset != 0x34) {
nv_wr32(priv, NV20_PGRAPH_ZCOMP(i), tile->zcomp);
nv_wr32(priv, NV10_PGRAPH_RDI_INDEX, 0x00ea0090 + 4 * i);
nv_wr32(priv, NV10_PGRAPH_RDI_DATA, tile->zcomp);
@@ -224,14 +224,14 @@ nv20_graph_intr(struct nouveau_subdev *subdev)
nv_wr32(priv, NV04_PGRAPH_FIFO, 0x00000001);
if (show) {
- nv_info(priv, "");
+ nv_error(priv, "");
nouveau_bitfield_print(nv10_graph_intr_name, show);
printk(" nsource:");
nouveau_bitfield_print(nv04_graph_nsource, nsource);
printk(" nstatus:");
nouveau_bitfield_print(nv10_graph_nstatus, nstatus);
printk("\n");
- nv_info(priv, "ch %d/%d class 0x%04x mthd 0x%04x data 0x%08x\n",
+ nv_error(priv, "ch %d/%d class 0x%04x mthd 0x%04x data 0x%08x\n",
chid, subc, class, mthd, data);
}
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/nv40.c b/drivers/gpu/drm/nouveau/core/engine/graph/nv40.c
index 8d0021049ec0..0b36dd3deebd 100644
--- a/drivers/gpu/drm/nouveau/core/engine/graph/nv40.c
+++ b/drivers/gpu/drm/nouveau/core/engine/graph/nv40.c
@@ -156,8 +156,8 @@ nv40_graph_context_ctor(struct nouveau_object *parent,
static int
nv40_graph_context_fini(struct nouveau_object *object, bool suspend)
{
- struct nv04_graph_priv *priv = (void *)object->engine;
- struct nv04_graph_chan *chan = (void *)object;
+ struct nv40_graph_priv *priv = (void *)object->engine;
+ struct nv40_graph_chan *chan = (void *)object;
u32 inst = 0x01000000 | nv_gpuobj(chan)->addr >> 4;
int ret = 0;
@@ -216,10 +216,10 @@ nv40_graph_tile_prog(struct nouveau_engine *engine, int i)
switch (nv_device(priv)->chipset) {
case 0x40:
- case 0x41: /* guess */
+ case 0x41:
case 0x42:
case 0x43:
- case 0x45: /* guess */
+ case 0x45:
case 0x4e:
nv_wr32(priv, NV20_PGRAPH_TSIZE(i), tile->pitch);
nv_wr32(priv, NV20_PGRAPH_TLIMIT(i), tile->limit);
@@ -227,6 +227,21 @@ nv40_graph_tile_prog(struct nouveau_engine *engine, int i)
nv_wr32(priv, NV40_PGRAPH_TSIZE1(i), tile->pitch);
nv_wr32(priv, NV40_PGRAPH_TLIMIT1(i), tile->limit);
nv_wr32(priv, NV40_PGRAPH_TILE1(i), tile->addr);
+ switch (nv_device(priv)->chipset) {
+ case 0x40:
+ case 0x45:
+ nv_wr32(priv, NV20_PGRAPH_ZCOMP(i), tile->zcomp);
+ nv_wr32(priv, NV40_PGRAPH_ZCOMP1(i), tile->zcomp);
+ break;
+ case 0x41:
+ case 0x42:
+ case 0x43:
+ nv_wr32(priv, NV41_PGRAPH_ZCOMP0(i), tile->zcomp);
+ nv_wr32(priv, NV41_PGRAPH_ZCOMP1(i), tile->zcomp);
+ break;
+ default:
+ break;
+ }
break;
case 0x44:
case 0x4a:
@@ -235,18 +250,31 @@ nv40_graph_tile_prog(struct nouveau_engine *engine, int i)
nv_wr32(priv, NV20_PGRAPH_TILE(i), tile->addr);
break;
case 0x46:
+ case 0x4c:
case 0x47:
case 0x49:
case 0x4b:
- case 0x4c:
+ case 0x63:
case 0x67:
- default:
+ case 0x68:
nv_wr32(priv, NV47_PGRAPH_TSIZE(i), tile->pitch);
nv_wr32(priv, NV47_PGRAPH_TLIMIT(i), tile->limit);
nv_wr32(priv, NV47_PGRAPH_TILE(i), tile->addr);
nv_wr32(priv, NV40_PGRAPH_TSIZE1(i), tile->pitch);
nv_wr32(priv, NV40_PGRAPH_TLIMIT1(i), tile->limit);
nv_wr32(priv, NV40_PGRAPH_TILE1(i), tile->addr);
+ switch (nv_device(priv)->chipset) {
+ case 0x47:
+ case 0x49:
+ case 0x4b:
+ nv_wr32(priv, NV47_PGRAPH_ZCOMP0(i), tile->zcomp);
+ nv_wr32(priv, NV47_PGRAPH_ZCOMP1(i), tile->zcomp);
+ break;
+ default:
+ break;
+ }
+ break;
+ default:
break;
}
@@ -293,7 +321,7 @@ nv40_graph_intr(struct nouveau_subdev *subdev)
nv_wr32(priv, NV04_PGRAPH_FIFO, 0x00000001);
if (show) {
- nv_info(priv, "");
+ nv_error(priv, "");
nouveau_bitfield_print(nv10_graph_intr_name, show);
printk(" nsource:");
nouveau_bitfield_print(nv04_graph_nsource, nsource);
@@ -346,7 +374,9 @@ nv40_graph_init(struct nouveau_object *object)
return ret;
/* generate and upload context program */
- nv40_grctx_init(nv_device(priv), &priv->size);
+ ret = nv40_grctx_init(nv_device(priv), &priv->size);
+ if (ret)
+ return ret;
/* No context present currently */
nv_wr32(priv, NV40_PGRAPH_CTXCTL_CUR, 0x00000000);
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/nv40.h b/drivers/gpu/drm/nouveau/core/engine/graph/nv40.h
index d2ac975afc2e..7da35a4e7970 100644
--- a/drivers/gpu/drm/nouveau/core/engine/graph/nv40.h
+++ b/drivers/gpu/drm/nouveau/core/engine/graph/nv40.h
@@ -15,7 +15,7 @@ nv44_graph_class(void *priv)
return !(0x0baf & (1 << (device->chipset & 0x0f)));
}
-void nv40_grctx_init(struct nouveau_device *, u32 *size);
+int nv40_grctx_init(struct nouveau_device *, u32 *size);
void nv40_grctx_fill(struct nouveau_device *, struct nouveau_gpuobj *);
#endif
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/nv50.c b/drivers/gpu/drm/nouveau/core/engine/graph/nv50.c
index ab3b9dcaf478..b1c3d835b4c2 100644
--- a/drivers/gpu/drm/nouveau/core/engine/graph/nv50.c
+++ b/drivers/gpu/drm/nouveau/core/engine/graph/nv50.c
@@ -184,6 +184,65 @@ nv50_graph_tlb_flush(struct nouveau_engine *engine)
return 0;
}
+static const struct nouveau_bitfield nv50_pgraph_status[] = {
+ { 0x00000001, "BUSY" }, /* set when any bit is set */
+ { 0x00000002, "DISPATCH" },
+ { 0x00000004, "UNK2" },
+ { 0x00000008, "UNK3" },
+ { 0x00000010, "UNK4" },
+ { 0x00000020, "UNK5" },
+ { 0x00000040, "M2MF" },
+ { 0x00000080, "UNK7" },
+ { 0x00000100, "CTXPROG" },
+ { 0x00000200, "VFETCH" },
+ { 0x00000400, "CCACHE_UNK4" },
+ { 0x00000800, "STRMOUT_GSCHED_UNK5" },
+ { 0x00001000, "UNK14XX" },
+ { 0x00002000, "UNK24XX_CSCHED" },
+ { 0x00004000, "UNK1CXX" },
+ { 0x00008000, "CLIPID" },
+ { 0x00010000, "ZCULL" },
+ { 0x00020000, "ENG2D" },
+ { 0x00040000, "UNK34XX" },
+ { 0x00080000, "TPRAST" },
+ { 0x00100000, "TPROP" },
+ { 0x00200000, "TEX" },
+ { 0x00400000, "TPVP" },
+ { 0x00800000, "MP" },
+ { 0x01000000, "ROP" },
+ {}
+};
+
+static const char *const nv50_pgraph_vstatus_0[] = {
+ "VFETCH", "CCACHE", "UNK4", "UNK5", "GSCHED", "STRMOUT", "UNK14XX", NULL
+};
+
+static const char *const nv50_pgraph_vstatus_1[] = {
+ "TPRAST", "TPROP", "TEXTURE", "TPVP", "MP", NULL
+};
+
+static const char *const nv50_pgraph_vstatus_2[] = {
+ "UNK24XX", "CSCHED", "UNK1CXX", "CLIPID", "ZCULL", "ENG2D", "UNK34XX",
+ "ROP", NULL
+};
+
+static void nouveau_pgraph_vstatus_print(struct nv50_graph_priv *priv, int r,
+ const char *const units[], u32 status)
+{
+ int i;
+
+ nv_error(priv, "PGRAPH_VSTATUS%d: 0x%08x", r, status);
+
+ for (i = 0; units[i] && status; i++) {
+ if ((status & 7) == 1)
+ pr_cont(" %s", units[i]);
+ status >>= 3;
+ }
+ if (status)
+ pr_cont(" (invalid: 0x%x)", status);
+ pr_cont("\n");
+}
+
static int
nv84_graph_tlb_flush(struct nouveau_engine *engine)
{
@@ -219,10 +278,19 @@ nv84_graph_tlb_flush(struct nouveau_engine *engine)
!(timeout = ptimer->read(ptimer) - start > 2000000000));
if (timeout) {
- nv_error(priv, "PGRAPH TLB flush idle timeout fail: "
- "0x%08x 0x%08x 0x%08x 0x%08x\n",
- nv_rd32(priv, 0x400700), nv_rd32(priv, 0x400380),
- nv_rd32(priv, 0x400384), nv_rd32(priv, 0x400388));
+ nv_error(priv, "PGRAPH TLB flush idle timeout fail\n");
+
+ tmp = nv_rd32(priv, 0x400700);
+ nv_error(priv, "PGRAPH_STATUS : 0x%08x", tmp);
+ nouveau_bitfield_print(nv50_pgraph_status, tmp);
+ pr_cont("\n");
+
+ nouveau_pgraph_vstatus_print(priv, 0, nv50_pgraph_vstatus_0,
+ nv_rd32(priv, 0x400380));
+ nouveau_pgraph_vstatus_print(priv, 1, nv50_pgraph_vstatus_1,
+ nv_rd32(priv, 0x400384));
+ nouveau_pgraph_vstatus_print(priv, 2, nv50_pgraph_vstatus_2,
+ nv_rd32(priv, 0x400388));
}
nv50_vm_flush_engine(&engine->base, 0x00);
@@ -453,13 +521,13 @@ nv50_priv_tp_trap(struct nv50_graph_priv *priv, int type, u32 ustatus_old,
}
if (ustatus) {
if (display)
- nv_info(priv, "%s - TP%d: Unhandled ustatus 0x%08x\n", name, i, ustatus);
+ nv_error(priv, "%s - TP%d: Unhandled ustatus 0x%08x\n", name, i, ustatus);
}
nv_wr32(priv, ustatus_addr, 0xc0000000);
}
if (!tps && display)
- nv_info(priv, "%s - No TPs claiming errors?\n", name);
+ nv_warn(priv, "%s - No TPs claiming errors?\n", name);
}
static int
@@ -718,13 +786,12 @@ nv50_graph_intr(struct nouveau_subdev *subdev)
nv_wr32(priv, 0x400500, 0x00010001);
if (show) {
- nv_info(priv, "");
+ nv_error(priv, "");
nouveau_bitfield_print(nv50_graph_intr_name, show);
printk("\n");
nv_error(priv, "ch %d [0x%010llx] subc %d class 0x%04x "
"mthd 0x%04x data 0x%08x\n",
chid, (u64)inst << 12, subc, class, mthd, data);
- nv50_fb_trap(nouveau_fb(priv), 1);
}
if (nv_rd32(priv, 0x400824) & (1 << 31))
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/nvc0.c b/drivers/gpu/drm/nouveau/core/engine/graph/nvc0.c
index c62f2d0f5f0a..47a02081d708 100644
--- a/drivers/gpu/drm/nouveau/core/engine/graph/nvc0.c
+++ b/drivers/gpu/drm/nouveau/core/engine/graph/nvc0.c
@@ -814,7 +814,7 @@ nvc0_graph_init_ctxctl(struct nvc0_graph_priv *priv)
nv_wr32(priv, 0x41a100, 0x00000002);
nv_wr32(priv, 0x409100, 0x00000002);
if (!nv_wait(priv, 0x409800, 0x00000001, 0x00000001))
- nv_info(priv, "0x409800 wait failed\n");
+ nv_warn(priv, "0x409800 wait failed\n");
nv_wr32(priv, 0x409840, 0xffffffff);
nv_wr32(priv, 0x409500, 0x7fffffff);
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/regs.h b/drivers/gpu/drm/nouveau/core/engine/graph/regs.h
index 9c715a25cecb..fde8e24415e4 100644
--- a/drivers/gpu/drm/nouveau/core/engine/graph/regs.h
+++ b/drivers/gpu/drm/nouveau/core/engine/graph/regs.h
@@ -205,6 +205,7 @@
#define NV20_PGRAPH_TSIZE(i) (0x00400908 + (i*16))
#define NV20_PGRAPH_TSTATUS(i) (0x0040090C + (i*16))
#define NV20_PGRAPH_ZCOMP(i) (0x00400980 + 4*(i))
+#define NV41_PGRAPH_ZCOMP0(i) (0x004009c0 + 4*(i))
#define NV10_PGRAPH_TILE(i) (0x00400B00 + (i*16))
#define NV10_PGRAPH_TLIMIT(i) (0x00400B04 + (i*16))
#define NV10_PGRAPH_TSIZE(i) (0x00400B08 + (i*16))
@@ -216,6 +217,7 @@
#define NV47_PGRAPH_TSTATUS(i) (0x00400D0C + (i*16))
#define NV04_PGRAPH_V_RAM 0x00400D40
#define NV04_PGRAPH_W_RAM 0x00400D80
+#define NV47_PGRAPH_ZCOMP0(i) (0x00400e00 + 4*(i))
#define NV10_PGRAPH_COMBINER0_IN_ALPHA 0x00400E40
#define NV10_PGRAPH_COMBINER1_IN_ALPHA 0x00400E44
#define NV10_PGRAPH_COMBINER0_IN_RGB 0x00400E48
@@ -261,9 +263,12 @@
#define NV04_PGRAPH_DMA_B_OFFSET 0x00401098
#define NV04_PGRAPH_DMA_B_SIZE 0x0040109C
#define NV04_PGRAPH_DMA_B_Y_SIZE 0x004010A0
+#define NV47_PGRAPH_ZCOMP1(i) (0x004068c0 + 4*(i))
#define NV40_PGRAPH_TILE1(i) (0x00406900 + (i*16))
#define NV40_PGRAPH_TLIMIT1(i) (0x00406904 + (i*16))
#define NV40_PGRAPH_TSIZE1(i) (0x00406908 + (i*16))
#define NV40_PGRAPH_TSTATUS1(i) (0x0040690C + (i*16))
+#define NV40_PGRAPH_ZCOMP1(i) (0x00406980 + 4*(i))
+#define NV41_PGRAPH_ZCOMP1(i) (0x004069c0 + 4*(i))
#endif
diff --git a/drivers/gpu/drm/nouveau/core/engine/mpeg/nv31.c b/drivers/gpu/drm/nouveau/core/engine/mpeg/nv31.c
index 1f394a2629e7..9fd86375f4c4 100644
--- a/drivers/gpu/drm/nouveau/core/engine/mpeg/nv31.c
+++ b/drivers/gpu/drm/nouveau/core/engine/mpeg/nv31.c
@@ -121,9 +121,9 @@ nv31_mpeg_ofuncs = {
static struct nouveau_omthds
nv31_mpeg_omthds[] = {
- { 0x0190, nv31_mpeg_mthd_dma },
- { 0x01a0, nv31_mpeg_mthd_dma },
- { 0x01b0, nv31_mpeg_mthd_dma },
+ { 0x0190, 0x0190, nv31_mpeg_mthd_dma },
+ { 0x01a0, 0x01a0, nv31_mpeg_mthd_dma },
+ { 0x01b0, 0x01b0, nv31_mpeg_mthd_dma },
{}
};
diff --git a/drivers/gpu/drm/nouveau/core/engine/mpeg/nv40.c b/drivers/gpu/drm/nouveau/core/engine/mpeg/nv40.c
index 12418574efea..f7c581ad1991 100644
--- a/drivers/gpu/drm/nouveau/core/engine/mpeg/nv40.c
+++ b/drivers/gpu/drm/nouveau/core/engine/mpeg/nv40.c
@@ -38,7 +38,7 @@ struct nv40_mpeg_priv {
};
struct nv40_mpeg_chan {
- struct nouveau_mpeg base;
+ struct nouveau_mpeg_chan base;
};
/*******************************************************************************
diff --git a/drivers/gpu/drm/nouveau/core/engine/mpeg/nv50.c b/drivers/gpu/drm/nouveau/core/engine/mpeg/nv50.c
index 8678a9996d57..bc7d12b30fc1 100644
--- a/drivers/gpu/drm/nouveau/core/engine/mpeg/nv50.c
+++ b/drivers/gpu/drm/nouveau/core/engine/mpeg/nv50.c
@@ -157,7 +157,6 @@ nv50_mpeg_intr(struct nouveau_subdev *subdev)
nv_wr32(priv, 0x00b100, stat);
nv_wr32(priv, 0x00b230, 0x00000001);
- nv50_fb_trap(nouveau_fb(priv), 1);
}
static void
diff --git a/drivers/gpu/drm/nouveau/core/engine/ppp/nv98.c b/drivers/gpu/drm/nouveau/core/engine/ppp/nv98.c
index 50e7e0da1981..5a5b2a773ed7 100644
--- a/drivers/gpu/drm/nouveau/core/engine/ppp/nv98.c
+++ b/drivers/gpu/drm/nouveau/core/engine/ppp/nv98.c
@@ -22,18 +22,18 @@
* Authors: Ben Skeggs
*/
-#include <core/os.h>
-#include <core/class.h>
+#include <core/engine.h>
#include <core/engctx.h>
+#include <core/class.h>
#include <engine/ppp.h>
struct nv98_ppp_priv {
- struct nouveau_ppp base;
+ struct nouveau_engine base;
};
struct nv98_ppp_chan {
- struct nouveau_ppp_chan base;
+ struct nouveau_engctx base;
};
/*******************************************************************************
@@ -49,61 +49,16 @@ nv98_ppp_sclass[] = {
* PPPP context
******************************************************************************/
-static int
-nv98_ppp_context_ctor(struct nouveau_object *parent,
- struct nouveau_object *engine,
- struct nouveau_oclass *oclass, void *data, u32 size,
- struct nouveau_object **pobject)
-{
- struct nv98_ppp_chan *priv;
- int ret;
-
- ret = nouveau_ppp_context_create(parent, engine, oclass, NULL,
- 0, 0, 0, &priv);
- *pobject = nv_object(priv);
- if (ret)
- return ret;
-
- return 0;
-}
-
-static void
-nv98_ppp_context_dtor(struct nouveau_object *object)
-{
- struct nv98_ppp_chan *priv = (void *)object;
- nouveau_ppp_context_destroy(&priv->base);
-}
-
-static int
-nv98_ppp_context_init(struct nouveau_object *object)
-{
- struct nv98_ppp_chan *priv = (void *)object;
- int ret;
-
- ret = nouveau_ppp_context_init(&priv->base);
- if (ret)
- return ret;
-
- return 0;
-}
-
-static int
-nv98_ppp_context_fini(struct nouveau_object *object, bool suspend)
-{
- struct nv98_ppp_chan *priv = (void *)object;
- return nouveau_ppp_context_fini(&priv->base, suspend);
-}
-
static struct nouveau_oclass
nv98_ppp_cclass = {
.handle = NV_ENGCTX(PPP, 0x98),
.ofuncs = &(struct nouveau_ofuncs) {
- .ctor = nv98_ppp_context_ctor,
- .dtor = nv98_ppp_context_dtor,
- .init = nv98_ppp_context_init,
- .fini = nv98_ppp_context_fini,
- .rd32 = _nouveau_ppp_context_rd32,
- .wr32 = _nouveau_ppp_context_wr32,
+ .ctor = _nouveau_engctx_ctor,
+ .dtor = _nouveau_engctx_dtor,
+ .init = _nouveau_engctx_init,
+ .fini = _nouveau_engctx_fini,
+ .rd32 = _nouveau_engctx_rd32,
+ .wr32 = _nouveau_engctx_wr32,
},
};
@@ -111,11 +66,6 @@ nv98_ppp_cclass = {
* PPPP engine/subdev functions
******************************************************************************/
-static void
-nv98_ppp_intr(struct nouveau_subdev *subdev)
-{
-}
-
static int
nv98_ppp_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
struct nouveau_oclass *oclass, void *data, u32 size,
@@ -124,52 +74,25 @@ nv98_ppp_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
struct nv98_ppp_priv *priv;
int ret;
- ret = nouveau_ppp_create(parent, engine, oclass, &priv);
+ ret = nouveau_engine_create(parent, engine, oclass, true,
+ "PPPP", "ppp", &priv);
*pobject = nv_object(priv);
if (ret)
return ret;
nv_subdev(priv)->unit = 0x00400002;
- nv_subdev(priv)->intr = nv98_ppp_intr;
nv_engine(priv)->cclass = &nv98_ppp_cclass;
nv_engine(priv)->sclass = nv98_ppp_sclass;
return 0;
}
-static void
-nv98_ppp_dtor(struct nouveau_object *object)
-{
- struct nv98_ppp_priv *priv = (void *)object;
- nouveau_ppp_destroy(&priv->base);
-}
-
-static int
-nv98_ppp_init(struct nouveau_object *object)
-{
- struct nv98_ppp_priv *priv = (void *)object;
- int ret;
-
- ret = nouveau_ppp_init(&priv->base);
- if (ret)
- return ret;
-
- return 0;
-}
-
-static int
-nv98_ppp_fini(struct nouveau_object *object, bool suspend)
-{
- struct nv98_ppp_priv *priv = (void *)object;
- return nouveau_ppp_fini(&priv->base, suspend);
-}
-
struct nouveau_oclass
nv98_ppp_oclass = {
.handle = NV_ENGINE(PPP, 0x98),
.ofuncs = &(struct nouveau_ofuncs) {
.ctor = nv98_ppp_ctor,
- .dtor = nv98_ppp_dtor,
- .init = nv98_ppp_init,
- .fini = nv98_ppp_fini,
+ .dtor = _nouveau_engine_dtor,
+ .init = _nouveau_engine_init,
+ .fini = _nouveau_engine_fini,
},
};
diff --git a/drivers/gpu/drm/nouveau/core/engine/ppp/nvc0.c b/drivers/gpu/drm/nouveau/core/engine/ppp/nvc0.c
new file mode 100644
index 000000000000..ebf0d860e2dd
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/engine/ppp/nvc0.c
@@ -0,0 +1,110 @@
+/*
+ * Copyright 2012 Maarten Lankhorst
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Maarten Lankhorst
+ */
+
+#include <core/falcon.h>
+
+#include <engine/ppp.h>
+
+struct nvc0_ppp_priv {
+ struct nouveau_falcon base;
+};
+
+/*******************************************************************************
+ * PPP object classes
+ ******************************************************************************/
+
+static struct nouveau_oclass
+nvc0_ppp_sclass[] = {
+ { 0x90b3, &nouveau_object_ofuncs },
+ {},
+};
+
+/*******************************************************************************
+ * PPPP context
+ ******************************************************************************/
+
+static struct nouveau_oclass
+nvc0_ppp_cclass = {
+ .handle = NV_ENGCTX(PPP, 0xc0),
+ .ofuncs = &(struct nouveau_ofuncs) {
+ .ctor = _nouveau_falcon_context_ctor,
+ .dtor = _nouveau_falcon_context_dtor,
+ .init = _nouveau_falcon_context_init,
+ .fini = _nouveau_falcon_context_fini,
+ .rd32 = _nouveau_falcon_context_rd32,
+ .wr32 = _nouveau_falcon_context_wr32,
+ },
+};
+
+/*******************************************************************************
+ * PPPP engine/subdev functions
+ ******************************************************************************/
+
+static int
+nvc0_ppp_init(struct nouveau_object *object)
+{
+ struct nvc0_ppp_priv *priv = (void *)object;
+ int ret;
+
+ ret = nouveau_falcon_init(&priv->base);
+ if (ret)
+ return ret;
+
+ nv_wr32(priv, 0x086010, 0x0000fff2);
+ nv_wr32(priv, 0x08601c, 0x0000fff2);
+ return 0;
+}
+
+static int
+nvc0_ppp_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
+ struct nouveau_oclass *oclass, void *data, u32 size,
+ struct nouveau_object **pobject)
+{
+ struct nvc0_ppp_priv *priv;
+ int ret;
+
+ ret = nouveau_falcon_create(parent, engine, oclass, 0x086000, true,
+ "PPPP", "ppp", &priv);
+ *pobject = nv_object(priv);
+ if (ret)
+ return ret;
+
+ nv_subdev(priv)->unit = 0x00000002;
+ nv_engine(priv)->cclass = &nvc0_ppp_cclass;
+ nv_engine(priv)->sclass = nvc0_ppp_sclass;
+ return 0;
+}
+
+struct nouveau_oclass
+nvc0_ppp_oclass = {
+ .handle = NV_ENGINE(PPP, 0xc0),
+ .ofuncs = &(struct nouveau_ofuncs) {
+ .ctor = nvc0_ppp_ctor,
+ .dtor = _nouveau_falcon_dtor,
+ .init = nvc0_ppp_init,
+ .fini = _nouveau_falcon_fini,
+ .rd32 = _nouveau_falcon_rd32,
+ .wr32 = _nouveau_falcon_wr32,
+ },
+};
diff --git a/drivers/gpu/drm/nouveau/core/engine/software/nv04.c b/drivers/gpu/drm/nouveau/core/engine/software/nv04.c
index 3ca4c3aa90b7..2a859a31c30d 100644
--- a/drivers/gpu/drm/nouveau/core/engine/software/nv04.c
+++ b/drivers/gpu/drm/nouveau/core/engine/software/nv04.c
@@ -63,8 +63,8 @@ nv04_software_flip(struct nouveau_object *object, u32 mthd,
static struct nouveau_omthds
nv04_software_omthds[] = {
- { 0x0150, nv04_software_set_ref },
- { 0x0500, nv04_software_flip },
+ { 0x0150, 0x0150, nv04_software_set_ref },
+ { 0x0500, 0x0500, nv04_software_flip },
{}
};
diff --git a/drivers/gpu/drm/nouveau/core/engine/software/nv10.c b/drivers/gpu/drm/nouveau/core/engine/software/nv10.c
index 6e699afbfdb7..a019364b1e13 100644
--- a/drivers/gpu/drm/nouveau/core/engine/software/nv10.c
+++ b/drivers/gpu/drm/nouveau/core/engine/software/nv10.c
@@ -52,7 +52,7 @@ nv10_software_flip(struct nouveau_object *object, u32 mthd,
static struct nouveau_omthds
nv10_software_omthds[] = {
- { 0x0500, nv10_software_flip },
+ { 0x0500, 0x0500, nv10_software_flip },
{}
};
diff --git a/drivers/gpu/drm/nouveau/core/engine/software/nv50.c b/drivers/gpu/drm/nouveau/core/engine/software/nv50.c
index a2edcd38544a..b0e7e1c01ce6 100644
--- a/drivers/gpu/drm/nouveau/core/engine/software/nv50.c
+++ b/drivers/gpu/drm/nouveau/core/engine/software/nv50.c
@@ -117,11 +117,11 @@ nv50_software_mthd_flip(struct nouveau_object *object, u32 mthd,
static struct nouveau_omthds
nv50_software_omthds[] = {
- { 0x018c, nv50_software_mthd_dma_vblsem },
- { 0x0400, nv50_software_mthd_vblsem_offset },
- { 0x0404, nv50_software_mthd_vblsem_value },
- { 0x0408, nv50_software_mthd_vblsem_release },
- { 0x0500, nv50_software_mthd_flip },
+ { 0x018c, 0x018c, nv50_software_mthd_dma_vblsem },
+ { 0x0400, 0x0400, nv50_software_mthd_vblsem_offset },
+ { 0x0404, 0x0404, nv50_software_mthd_vblsem_value },
+ { 0x0408, 0x0408, nv50_software_mthd_vblsem_release },
+ { 0x0500, 0x0500, nv50_software_mthd_flip },
{}
};
diff --git a/drivers/gpu/drm/nouveau/core/engine/software/nvc0.c b/drivers/gpu/drm/nouveau/core/engine/software/nvc0.c
index b7b0d7e330d6..282a1cd1bc2f 100644
--- a/drivers/gpu/drm/nouveau/core/engine/software/nvc0.c
+++ b/drivers/gpu/drm/nouveau/core/engine/software/nvc0.c
@@ -99,11 +99,11 @@ nvc0_software_mthd_flip(struct nouveau_object *object, u32 mthd,
static struct nouveau_omthds
nvc0_software_omthds[] = {
- { 0x0400, nvc0_software_mthd_vblsem_offset },
- { 0x0404, nvc0_software_mthd_vblsem_offset },
- { 0x0408, nvc0_software_mthd_vblsem_value },
- { 0x040c, nvc0_software_mthd_vblsem_release },
- { 0x0500, nvc0_software_mthd_flip },
+ { 0x0400, 0x0400, nvc0_software_mthd_vblsem_offset },
+ { 0x0404, 0x0404, nvc0_software_mthd_vblsem_offset },
+ { 0x0408, 0x0408, nvc0_software_mthd_vblsem_value },
+ { 0x040c, 0x040c, nvc0_software_mthd_vblsem_release },
+ { 0x0500, 0x0500, nvc0_software_mthd_flip },
{}
};
diff --git a/drivers/gpu/drm/nouveau/core/engine/vp/nv84.c b/drivers/gpu/drm/nouveau/core/engine/vp/nv84.c
index dd23c80e5405..261cd96e6951 100644
--- a/drivers/gpu/drm/nouveau/core/engine/vp/nv84.c
+++ b/drivers/gpu/drm/nouveau/core/engine/vp/nv84.c
@@ -22,18 +22,13 @@
* Authors: Ben Skeggs
*/
-#include <core/os.h>
-#include <core/class.h>
#include <core/engctx.h>
+#include <core/class.h>
#include <engine/vp.h>
struct nv84_vp_priv {
- struct nouveau_vp base;
-};
-
-struct nv84_vp_chan {
- struct nouveau_vp_chan base;
+ struct nouveau_engine base;
};
/*******************************************************************************
@@ -49,61 +44,16 @@ nv84_vp_sclass[] = {
* PVP context
******************************************************************************/
-static int
-nv84_vp_context_ctor(struct nouveau_object *parent,
- struct nouveau_object *engine,
- struct nouveau_oclass *oclass, void *data, u32 size,
- struct nouveau_object **pobject)
-{
- struct nv84_vp_chan *priv;
- int ret;
-
- ret = nouveau_vp_context_create(parent, engine, oclass, NULL,
- 0, 0, 0, &priv);
- *pobject = nv_object(priv);
- if (ret)
- return ret;
-
- return 0;
-}
-
-static void
-nv84_vp_context_dtor(struct nouveau_object *object)
-{
- struct nv84_vp_chan *priv = (void *)object;
- nouveau_vp_context_destroy(&priv->base);
-}
-
-static int
-nv84_vp_context_init(struct nouveau_object *object)
-{
- struct nv84_vp_chan *priv = (void *)object;
- int ret;
-
- ret = nouveau_vp_context_init(&priv->base);
- if (ret)
- return ret;
-
- return 0;
-}
-
-static int
-nv84_vp_context_fini(struct nouveau_object *object, bool suspend)
-{
- struct nv84_vp_chan *priv = (void *)object;
- return nouveau_vp_context_fini(&priv->base, suspend);
-}
-
static struct nouveau_oclass
nv84_vp_cclass = {
.handle = NV_ENGCTX(VP, 0x84),
.ofuncs = &(struct nouveau_ofuncs) {
- .ctor = nv84_vp_context_ctor,
- .dtor = nv84_vp_context_dtor,
- .init = nv84_vp_context_init,
- .fini = nv84_vp_context_fini,
- .rd32 = _nouveau_vp_context_rd32,
- .wr32 = _nouveau_vp_context_wr32,
+ .ctor = _nouveau_engctx_ctor,
+ .dtor = _nouveau_engctx_dtor,
+ .init = _nouveau_engctx_init,
+ .fini = _nouveau_engctx_fini,
+ .rd32 = _nouveau_engctx_rd32,
+ .wr32 = _nouveau_engctx_wr32,
},
};
@@ -111,11 +61,6 @@ nv84_vp_cclass = {
* PVP engine/subdev functions
******************************************************************************/
-static void
-nv84_vp_intr(struct nouveau_subdev *subdev)
-{
-}
-
static int
nv84_vp_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
struct nouveau_oclass *oclass, void *data, u32 size,
@@ -124,52 +69,25 @@ nv84_vp_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
struct nv84_vp_priv *priv;
int ret;
- ret = nouveau_vp_create(parent, engine, oclass, &priv);
+ ret = nouveau_engine_create(parent, engine, oclass, true,
+ "PVP", "vp", &priv);
*pobject = nv_object(priv);
if (ret)
return ret;
nv_subdev(priv)->unit = 0x01020000;
- nv_subdev(priv)->intr = nv84_vp_intr;
nv_engine(priv)->cclass = &nv84_vp_cclass;
nv_engine(priv)->sclass = nv84_vp_sclass;
return 0;
}
-static void
-nv84_vp_dtor(struct nouveau_object *object)
-{
- struct nv84_vp_priv *priv = (void *)object;
- nouveau_vp_destroy(&priv->base);
-}
-
-static int
-nv84_vp_init(struct nouveau_object *object)
-{
- struct nv84_vp_priv *priv = (void *)object;
- int ret;
-
- ret = nouveau_vp_init(&priv->base);
- if (ret)
- return ret;
-
- return 0;
-}
-
-static int
-nv84_vp_fini(struct nouveau_object *object, bool suspend)
-{
- struct nv84_vp_priv *priv = (void *)object;
- return nouveau_vp_fini(&priv->base, suspend);
-}
-
struct nouveau_oclass
nv84_vp_oclass = {
.handle = NV_ENGINE(VP, 0x84),
.ofuncs = &(struct nouveau_ofuncs) {
.ctor = nv84_vp_ctor,
- .dtor = nv84_vp_dtor,
- .init = nv84_vp_init,
- .fini = nv84_vp_fini,
+ .dtor = _nouveau_engine_dtor,
+ .init = _nouveau_engine_init,
+ .fini = _nouveau_engine_fini,
},
};
diff --git a/drivers/gpu/drm/nouveau/core/engine/vp/nvc0.c b/drivers/gpu/drm/nouveau/core/engine/vp/nvc0.c
new file mode 100644
index 000000000000..f761949d7039
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/engine/vp/nvc0.c
@@ -0,0 +1,110 @@
+/*
+ * Copyright 2012 Maarten Lankhorst
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Maarten Lankhorst
+ */
+
+#include <core/falcon.h>
+
+#include <engine/vp.h>
+
+struct nvc0_vp_priv {
+ struct nouveau_falcon base;
+};
+
+/*******************************************************************************
+ * VP object classes
+ ******************************************************************************/
+
+static struct nouveau_oclass
+nvc0_vp_sclass[] = {
+ { 0x90b2, &nouveau_object_ofuncs },
+ {},
+};
+
+/*******************************************************************************
+ * PVP context
+ ******************************************************************************/
+
+static struct nouveau_oclass
+nvc0_vp_cclass = {
+ .handle = NV_ENGCTX(VP, 0xc0),
+ .ofuncs = &(struct nouveau_ofuncs) {
+ .ctor = _nouveau_falcon_context_ctor,
+ .dtor = _nouveau_falcon_context_dtor,
+ .init = _nouveau_falcon_context_init,
+ .fini = _nouveau_falcon_context_fini,
+ .rd32 = _nouveau_falcon_context_rd32,
+ .wr32 = _nouveau_falcon_context_wr32,
+ },
+};
+
+/*******************************************************************************
+ * PVP engine/subdev functions
+ ******************************************************************************/
+
+static int
+nvc0_vp_init(struct nouveau_object *object)
+{
+ struct nvc0_vp_priv *priv = (void *)object;
+ int ret;
+
+ ret = nouveau_falcon_init(&priv->base);
+ if (ret)
+ return ret;
+
+ nv_wr32(priv, 0x085010, 0x0000fff2);
+ nv_wr32(priv, 0x08501c, 0x0000fff2);
+ return 0;
+}
+
+static int
+nvc0_vp_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
+ struct nouveau_oclass *oclass, void *data, u32 size,
+ struct nouveau_object **pobject)
+{
+ struct nvc0_vp_priv *priv;
+ int ret;
+
+ ret = nouveau_falcon_create(parent, engine, oclass, 0x085000, true,
+ "PVP", "vp", &priv);
+ *pobject = nv_object(priv);
+ if (ret)
+ return ret;
+
+ nv_subdev(priv)->unit = 0x00020000;
+ nv_engine(priv)->cclass = &nvc0_vp_cclass;
+ nv_engine(priv)->sclass = nvc0_vp_sclass;
+ return 0;
+}
+
+struct nouveau_oclass
+nvc0_vp_oclass = {
+ .handle = NV_ENGINE(VP, 0xc0),
+ .ofuncs = &(struct nouveau_ofuncs) {
+ .ctor = nvc0_vp_ctor,
+ .dtor = _nouveau_falcon_dtor,
+ .init = nvc0_vp_init,
+ .fini = _nouveau_falcon_fini,
+ .rd32 = _nouveau_falcon_rd32,
+ .wr32 = _nouveau_falcon_wr32,
+ },
+};
diff --git a/drivers/gpu/drm/nouveau/core/engine/vp/nve0.c b/drivers/gpu/drm/nouveau/core/engine/vp/nve0.c
new file mode 100644
index 000000000000..2384ce5dbe16
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/engine/vp/nve0.c
@@ -0,0 +1,110 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include <core/falcon.h>
+
+#include <engine/vp.h>
+
+struct nve0_vp_priv {
+ struct nouveau_falcon base;
+};
+
+/*******************************************************************************
+ * VP object classes
+ ******************************************************************************/
+
+static struct nouveau_oclass
+nve0_vp_sclass[] = {
+ { 0x95b2, &nouveau_object_ofuncs },
+ {},
+};
+
+/*******************************************************************************
+ * PVP context
+ ******************************************************************************/
+
+static struct nouveau_oclass
+nve0_vp_cclass = {
+ .handle = NV_ENGCTX(VP, 0xe0),
+ .ofuncs = &(struct nouveau_ofuncs) {
+ .ctor = _nouveau_falcon_context_ctor,
+ .dtor = _nouveau_falcon_context_dtor,
+ .init = _nouveau_falcon_context_init,
+ .fini = _nouveau_falcon_context_fini,
+ .rd32 = _nouveau_falcon_context_rd32,
+ .wr32 = _nouveau_falcon_context_wr32,
+ },
+};
+
+/*******************************************************************************
+ * PVP engine/subdev functions
+ ******************************************************************************/
+
+static int
+nve0_vp_init(struct nouveau_object *object)
+{
+ struct nve0_vp_priv *priv = (void *)object;
+ int ret;
+
+ ret = nouveau_falcon_init(&priv->base);
+ if (ret)
+ return ret;
+
+ nv_wr32(priv, 0x085010, 0x0000fff2);
+ nv_wr32(priv, 0x08501c, 0x0000fff2);
+ return 0;
+}
+
+static int
+nve0_vp_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
+ struct nouveau_oclass *oclass, void *data, u32 size,
+ struct nouveau_object **pobject)
+{
+ struct nve0_vp_priv *priv;
+ int ret;
+
+ ret = nouveau_falcon_create(parent, engine, oclass, 0x085000, true,
+ "PVP", "vp", &priv);
+ *pobject = nv_object(priv);
+ if (ret)
+ return ret;
+
+ nv_subdev(priv)->unit = 0x00020000;
+ nv_engine(priv)->cclass = &nve0_vp_cclass;
+ nv_engine(priv)->sclass = nve0_vp_sclass;
+ return 0;
+}
+
+struct nouveau_oclass
+nve0_vp_oclass = {
+ .handle = NV_ENGINE(VP, 0xe0),
+ .ofuncs = &(struct nouveau_ofuncs) {
+ .ctor = nve0_vp_ctor,
+ .dtor = _nouveau_falcon_dtor,
+ .init = nve0_vp_init,
+ .fini = _nouveau_falcon_fini,
+ .rd32 = _nouveau_falcon_rd32,
+ .wr32 = _nouveau_falcon_wr32,
+ },
+};
diff --git a/drivers/gpu/drm/nouveau/core/include/core/class.h b/drivers/gpu/drm/nouveau/core/include/core/class.h
index 6180ae9800fc..47c4b3a5bd3a 100644
--- a/drivers/gpu/drm/nouveau/core/include/core/class.h
+++ b/drivers/gpu/drm/nouveau/core/include/core/class.h
@@ -23,6 +23,7 @@
#define NV_DEVICE_DISABLE_COPY0 0x0000008000000000ULL
#define NV_DEVICE_DISABLE_COPY1 0x0000010000000000ULL
#define NV_DEVICE_DISABLE_UNK1C1 0x0000020000000000ULL
+#define NV_DEVICE_DISABLE_VENC 0x0000040000000000ULL
struct nv_device_class {
u64 device; /* device identifier, ~0 for client default */
@@ -52,11 +53,49 @@ struct nv_device_class {
#define NV_DMA_ACCESS_WR 0x00000200
#define NV_DMA_ACCESS_RDWR 0x00000300
+/* NV50:NVC0 */
+#define NV50_DMA_CONF0_ENABLE 0x80000000
+#define NV50_DMA_CONF0_PRIV 0x00300000
+#define NV50_DMA_CONF0_PRIV_VM 0x00000000
+#define NV50_DMA_CONF0_PRIV_US 0x00100000
+#define NV50_DMA_CONF0_PRIV__S 0x00200000
+#define NV50_DMA_CONF0_PART 0x00030000
+#define NV50_DMA_CONF0_PART_VM 0x00000000
+#define NV50_DMA_CONF0_PART_256 0x00010000
+#define NV50_DMA_CONF0_PART_1KB 0x00020000
+#define NV50_DMA_CONF0_COMP 0x00000180
+#define NV50_DMA_CONF0_COMP_NONE 0x00000000
+#define NV50_DMA_CONF0_COMP_VM 0x00000180
+#define NV50_DMA_CONF0_TYPE 0x0000007f
+#define NV50_DMA_CONF0_TYPE_LINEAR 0x00000000
+#define NV50_DMA_CONF0_TYPE_VM 0x0000007f
+
+/* NVC0:NVD9 */
+#define NVC0_DMA_CONF0_ENABLE 0x80000000
+#define NVC0_DMA_CONF0_PRIV 0x00300000
+#define NVC0_DMA_CONF0_PRIV_VM 0x00000000
+#define NVC0_DMA_CONF0_PRIV_US 0x00100000
+#define NVC0_DMA_CONF0_PRIV__S 0x00200000
+#define NVC0_DMA_CONF0_UNKN /* PART? */ 0x00030000
+#define NVC0_DMA_CONF0_TYPE 0x000000ff
+#define NVC0_DMA_CONF0_TYPE_LINEAR 0x00000000
+#define NVC0_DMA_CONF0_TYPE_VM 0x000000ff
+
+/* NVD9- */
+#define NVD0_DMA_CONF0_ENABLE 0x80000000
+#define NVD0_DMA_CONF0_PAGE 0x00000400
+#define NVD0_DMA_CONF0_PAGE_LP 0x00000000
+#define NVD0_DMA_CONF0_PAGE_SP 0x00000400
+#define NVD0_DMA_CONF0_TYPE 0x000000ff
+#define NVD0_DMA_CONF0_TYPE_LINEAR 0x00000000
+#define NVD0_DMA_CONF0_TYPE_VM 0x000000ff
+
struct nv_dma_class {
u32 flags;
u32 pad0;
u64 start;
u64 limit;
+ u32 conf0;
};
/* DMA FIFO channel classes
@@ -115,4 +154,190 @@ struct nve0_channel_ind_class {
u32 engine;
};
+/* 5070: NV50_DISP
+ * 8270: NV84_DISP
+ * 8370: NVA0_DISP
+ * 8870: NV94_DISP
+ * 8570: NVA3_DISP
+ * 9070: NVD0_DISP
+ * 9170: NVE0_DISP
+ */
+
+#define NV50_DISP_CLASS 0x00005070
+#define NV84_DISP_CLASS 0x00008270
+#define NVA0_DISP_CLASS 0x00008370
+#define NV94_DISP_CLASS 0x00008870
+#define NVA3_DISP_CLASS 0x00008570
+#define NVD0_DISP_CLASS 0x00009070
+#define NVE0_DISP_CLASS 0x00009170
+
+#define NV50_DISP_SOR_MTHD 0x00010000
+#define NV50_DISP_SOR_MTHD_TYPE 0x0000f000
+#define NV50_DISP_SOR_MTHD_HEAD 0x00000018
+#define NV50_DISP_SOR_MTHD_LINK 0x00000004
+#define NV50_DISP_SOR_MTHD_OR 0x00000003
+
+#define NV50_DISP_SOR_PWR 0x00010000
+#define NV50_DISP_SOR_PWR_STATE 0x00000001
+#define NV50_DISP_SOR_PWR_STATE_ON 0x00000001
+#define NV50_DISP_SOR_PWR_STATE_OFF 0x00000000
+#define NVA3_DISP_SOR_HDA_ELD 0x00010100
+#define NV84_DISP_SOR_HDMI_PWR 0x00012000
+#define NV84_DISP_SOR_HDMI_PWR_STATE 0x40000000
+#define NV84_DISP_SOR_HDMI_PWR_STATE_OFF 0x00000000
+#define NV84_DISP_SOR_HDMI_PWR_STATE_ON 0x40000000
+#define NV84_DISP_SOR_HDMI_PWR_MAX_AC_PACKET 0x001f0000
+#define NV84_DISP_SOR_HDMI_PWR_REKEY 0x0000007f
+#define NV50_DISP_SOR_LVDS_SCRIPT 0x00013000
+#define NV50_DISP_SOR_LVDS_SCRIPT_ID 0x0000ffff
+#define NV94_DISP_SOR_DP_TRAIN 0x00016000
+#define NV94_DISP_SOR_DP_TRAIN_OP 0xf0000000
+#define NV94_DISP_SOR_DP_TRAIN_OP_PATTERN 0x00000000
+#define NV94_DISP_SOR_DP_TRAIN_OP_INIT 0x10000000
+#define NV94_DISP_SOR_DP_TRAIN_OP_FINI 0x20000000
+#define NV94_DISP_SOR_DP_TRAIN_INIT_SPREAD 0x00000001
+#define NV94_DISP_SOR_DP_TRAIN_INIT_SPREAD_OFF 0x00000000
+#define NV94_DISP_SOR_DP_TRAIN_INIT_SPREAD_ON 0x00000001
+#define NV94_DISP_SOR_DP_TRAIN_PATTERN 0x00000003
+#define NV94_DISP_SOR_DP_TRAIN_PATTERN_DISABLED 0x00000000
+#define NV94_DISP_SOR_DP_LNKCTL 0x00016040
+#define NV94_DISP_SOR_DP_LNKCTL_FRAME 0x80000000
+#define NV94_DISP_SOR_DP_LNKCTL_FRAME_STD 0x00000000
+#define NV94_DISP_SOR_DP_LNKCTL_FRAME_ENH 0x80000000
+#define NV94_DISP_SOR_DP_LNKCTL_WIDTH 0x00001f00
+#define NV94_DISP_SOR_DP_LNKCTL_COUNT 0x00000007
+#define NV94_DISP_SOR_DP_DRVCTL(l) ((l) * 0x40 + 0x00016100)
+#define NV94_DISP_SOR_DP_DRVCTL_VS 0x00000300
+#define NV94_DISP_SOR_DP_DRVCTL_PE 0x00000003
+
+#define NV50_DISP_DAC_MTHD 0x00020000
+#define NV50_DISP_DAC_MTHD_TYPE 0x0000f000
+#define NV50_DISP_DAC_MTHD_OR 0x00000003
+
+#define NV50_DISP_DAC_PWR 0x00020000
+#define NV50_DISP_DAC_PWR_HSYNC 0x00000001
+#define NV50_DISP_DAC_PWR_HSYNC_ON 0x00000000
+#define NV50_DISP_DAC_PWR_HSYNC_LO 0x00000001
+#define NV50_DISP_DAC_PWR_VSYNC 0x00000004
+#define NV50_DISP_DAC_PWR_VSYNC_ON 0x00000000
+#define NV50_DISP_DAC_PWR_VSYNC_LO 0x00000004
+#define NV50_DISP_DAC_PWR_DATA 0x00000010
+#define NV50_DISP_DAC_PWR_DATA_ON 0x00000000
+#define NV50_DISP_DAC_PWR_DATA_LO 0x00000010
+#define NV50_DISP_DAC_PWR_STATE 0x00000040
+#define NV50_DISP_DAC_PWR_STATE_ON 0x00000000
+#define NV50_DISP_DAC_PWR_STATE_OFF 0x00000040
+#define NV50_DISP_DAC_LOAD 0x0002000c
+#define NV50_DISP_DAC_LOAD_VALUE 0x00000007
+
+struct nv50_display_class {
+};
+
+/* 507a: NV50_DISP_CURS
+ * 827a: NV84_DISP_CURS
+ * 837a: NVA0_DISP_CURS
+ * 887a: NV94_DISP_CURS
+ * 857a: NVA3_DISP_CURS
+ * 907a: NVD0_DISP_CURS
+ * 917a: NVE0_DISP_CURS
+ */
+
+#define NV50_DISP_CURS_CLASS 0x0000507a
+#define NV84_DISP_CURS_CLASS 0x0000827a
+#define NVA0_DISP_CURS_CLASS 0x0000837a
+#define NV94_DISP_CURS_CLASS 0x0000887a
+#define NVA3_DISP_CURS_CLASS 0x0000857a
+#define NVD0_DISP_CURS_CLASS 0x0000907a
+#define NVE0_DISP_CURS_CLASS 0x0000917a
+
+struct nv50_display_curs_class {
+ u32 head;
+};
+
+/* 507b: NV50_DISP_OIMM
+ * 827b: NV84_DISP_OIMM
+ * 837b: NVA0_DISP_OIMM
+ * 887b: NV94_DISP_OIMM
+ * 857b: NVA3_DISP_OIMM
+ * 907b: NVD0_DISP_OIMM
+ * 917b: NVE0_DISP_OIMM
+ */
+
+#define NV50_DISP_OIMM_CLASS 0x0000507b
+#define NV84_DISP_OIMM_CLASS 0x0000827b
+#define NVA0_DISP_OIMM_CLASS 0x0000837b
+#define NV94_DISP_OIMM_CLASS 0x0000887b
+#define NVA3_DISP_OIMM_CLASS 0x0000857b
+#define NVD0_DISP_OIMM_CLASS 0x0000907b
+#define NVE0_DISP_OIMM_CLASS 0x0000917b
+
+struct nv50_display_oimm_class {
+ u32 head;
+};
+
+/* 507c: NV50_DISP_SYNC
+ * 827c: NV84_DISP_SYNC
+ * 837c: NVA0_DISP_SYNC
+ * 887c: NV94_DISP_SYNC
+ * 857c: NVA3_DISP_SYNC
+ * 907c: NVD0_DISP_SYNC
+ * 917c: NVE0_DISP_SYNC
+ */
+
+#define NV50_DISP_SYNC_CLASS 0x0000507c
+#define NV84_DISP_SYNC_CLASS 0x0000827c
+#define NVA0_DISP_SYNC_CLASS 0x0000837c
+#define NV94_DISP_SYNC_CLASS 0x0000887c
+#define NVA3_DISP_SYNC_CLASS 0x0000857c
+#define NVD0_DISP_SYNC_CLASS 0x0000907c
+#define NVE0_DISP_SYNC_CLASS 0x0000917c
+
+struct nv50_display_sync_class {
+ u32 pushbuf;
+ u32 head;
+};
+
+/* 507d: NV50_DISP_MAST
+ * 827d: NV84_DISP_MAST
+ * 837d: NVA0_DISP_MAST
+ * 887d: NV94_DISP_MAST
+ * 857d: NVA3_DISP_MAST
+ * 907d: NVD0_DISP_MAST
+ * 917d: NVE0_DISP_MAST
+ */
+
+#define NV50_DISP_MAST_CLASS 0x0000507d
+#define NV84_DISP_MAST_CLASS 0x0000827d
+#define NVA0_DISP_MAST_CLASS 0x0000837d
+#define NV94_DISP_MAST_CLASS 0x0000887d
+#define NVA3_DISP_MAST_CLASS 0x0000857d
+#define NVD0_DISP_MAST_CLASS 0x0000907d
+#define NVE0_DISP_MAST_CLASS 0x0000917d
+
+struct nv50_display_mast_class {
+ u32 pushbuf;
+};
+
+/* 507e: NV50_DISP_OVLY
+ * 827e: NV84_DISP_OVLY
+ * 837e: NVA0_DISP_OVLY
+ * 887e: NV94_DISP_OVLY
+ * 857e: NVA3_DISP_OVLY
+ * 907e: NVD0_DISP_OVLY
+ * 917e: NVE0_DISP_OVLY
+ */
+
+#define NV50_DISP_OVLY_CLASS 0x0000507e
+#define NV84_DISP_OVLY_CLASS 0x0000827e
+#define NVA0_DISP_OVLY_CLASS 0x0000837e
+#define NV94_DISP_OVLY_CLASS 0x0000887e
+#define NVA3_DISP_OVLY_CLASS 0x0000857e
+#define NVD0_DISP_OVLY_CLASS 0x0000907e
+#define NVE0_DISP_OVLY_CLASS 0x0000917e
+
+struct nv50_display_ovly_class {
+ u32 pushbuf;
+ u32 head;
+};
+
#endif
diff --git a/drivers/gpu/drm/nouveau/core/include/core/engctx.h b/drivers/gpu/drm/nouveau/core/include/core/engctx.h
index 8a947b6872eb..2fd48b564c7d 100644
--- a/drivers/gpu/drm/nouveau/core/include/core/engctx.h
+++ b/drivers/gpu/drm/nouveau/core/include/core/engctx.h
@@ -39,6 +39,9 @@ void nouveau_engctx_destroy(struct nouveau_engctx *);
int nouveau_engctx_init(struct nouveau_engctx *);
int nouveau_engctx_fini(struct nouveau_engctx *, bool suspend);
+int _nouveau_engctx_ctor(struct nouveau_object *, struct nouveau_object *,
+ struct nouveau_oclass *, void *, u32,
+ struct nouveau_object **);
void _nouveau_engctx_dtor(struct nouveau_object *);
int _nouveau_engctx_init(struct nouveau_object *);
int _nouveau_engctx_fini(struct nouveau_object *, bool suspend);
diff --git a/drivers/gpu/drm/nouveau/core/include/core/falcon.h b/drivers/gpu/drm/nouveau/core/include/core/falcon.h
new file mode 100644
index 000000000000..1edec386ab36
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/include/core/falcon.h
@@ -0,0 +1,81 @@
+#ifndef __NOUVEAU_FALCON_H__
+#define __NOUVEAU_FALCON_H__
+
+#include <core/engine.h>
+#include <core/engctx.h>
+#include <core/gpuobj.h>
+
+struct nouveau_falcon_chan {
+ struct nouveau_engctx base;
+};
+
+#define nouveau_falcon_context_create(p,e,c,g,s,a,f,d) \
+ nouveau_engctx_create((p), (e), (c), (g), (s), (a), (f), (d))
+#define nouveau_falcon_context_destroy(d) \
+ nouveau_engctx_destroy(&(d)->base)
+#define nouveau_falcon_context_init(d) \
+ nouveau_engctx_init(&(d)->base)
+#define nouveau_falcon_context_fini(d,s) \
+ nouveau_engctx_fini(&(d)->base, (s))
+
+#define _nouveau_falcon_context_ctor _nouveau_engctx_ctor
+#define _nouveau_falcon_context_dtor _nouveau_engctx_dtor
+#define _nouveau_falcon_context_init _nouveau_engctx_init
+#define _nouveau_falcon_context_fini _nouveau_engctx_fini
+#define _nouveau_falcon_context_rd32 _nouveau_engctx_rd32
+#define _nouveau_falcon_context_wr32 _nouveau_engctx_wr32
+
+struct nouveau_falcon_data {
+ bool external;
+};
+
+struct nouveau_falcon {
+ struct nouveau_engine base;
+
+ u32 addr;
+ u8 version;
+ u8 secret;
+
+ struct nouveau_gpuobj *core;
+ bool external;
+
+ struct {
+ u32 limit;
+ u32 *data;
+ u32 size;
+ } code;
+
+ struct {
+ u32 limit;
+ u32 *data;
+ u32 size;
+ } data;
+};
+
+#define nv_falcon(priv) (&(priv)->base)
+
+#define nouveau_falcon_create(p,e,c,b,d,i,f,r) \
+ nouveau_falcon_create_((p), (e), (c), (b), (d), (i), (f), \
+ sizeof(**r),(void **)r)
+#define nouveau_falcon_destroy(p) \
+ nouveau_engine_destroy(&(p)->base)
+#define nouveau_falcon_init(p) ({ \
+ struct nouveau_falcon *falcon = (p); \
+ _nouveau_falcon_init(nv_object(falcon)); \
+})
+#define nouveau_falcon_fini(p,s) ({ \
+ struct nouveau_falcon *falcon = (p); \
+ _nouveau_falcon_fini(nv_object(falcon), (s)); \
+})
+
+int nouveau_falcon_create_(struct nouveau_object *, struct nouveau_object *,
+ struct nouveau_oclass *, u32, bool, const char *,
+ const char *, int, void **);
+
+#define _nouveau_falcon_dtor _nouveau_engine_dtor
+int _nouveau_falcon_init(struct nouveau_object *);
+int _nouveau_falcon_fini(struct nouveau_object *, bool);
+u32 _nouveau_falcon_rd32(struct nouveau_object *, u64);
+void _nouveau_falcon_wr32(struct nouveau_object *, u64, u32);
+
+#endif
diff --git a/drivers/gpu/drm/nouveau/core/include/core/gpuobj.h b/drivers/gpu/drm/nouveau/core/include/core/gpuobj.h
index 6eaff79377ae..b3b9ce4e9d38 100644
--- a/drivers/gpu/drm/nouveau/core/include/core/gpuobj.h
+++ b/drivers/gpu/drm/nouveau/core/include/core/gpuobj.h
@@ -65,7 +65,7 @@ nouveau_gpuobj_ref(struct nouveau_gpuobj *obj, struct nouveau_gpuobj **ref)
void _nouveau_gpuobj_dtor(struct nouveau_object *);
int _nouveau_gpuobj_init(struct nouveau_object *);
int _nouveau_gpuobj_fini(struct nouveau_object *, bool);
-u32 _nouveau_gpuobj_rd32(struct nouveau_object *, u32);
-void _nouveau_gpuobj_wr32(struct nouveau_object *, u32, u32);
+u32 _nouveau_gpuobj_rd32(struct nouveau_object *, u64);
+void _nouveau_gpuobj_wr32(struct nouveau_object *, u64, u32);
#endif
diff --git a/drivers/gpu/drm/nouveau/core/include/core/mm.h b/drivers/gpu/drm/nouveau/core/include/core/mm.h
index 975137ba34a6..2514e81ade02 100644
--- a/drivers/gpu/drm/nouveau/core/include/core/mm.h
+++ b/drivers/gpu/drm/nouveau/core/include/core/mm.h
@@ -21,6 +21,12 @@ struct nouveau_mm {
int heap_nodes;
};
+static inline bool
+nouveau_mm_initialised(struct nouveau_mm *mm)
+{
+ return mm->block_size != 0;
+}
+
int nouveau_mm_init(struct nouveau_mm *, u32 offset, u32 length, u32 block);
int nouveau_mm_fini(struct nouveau_mm *);
int nouveau_mm_head(struct nouveau_mm *, u8 type, u32 size_max, u32 size_min,
diff --git a/drivers/gpu/drm/nouveau/core/include/core/object.h b/drivers/gpu/drm/nouveau/core/include/core/object.h
index 818feabbf4a0..5982935ee23a 100644
--- a/drivers/gpu/drm/nouveau/core/include/core/object.h
+++ b/drivers/gpu/drm/nouveau/core/include/core/object.h
@@ -70,7 +70,8 @@ nv_pclass(struct nouveau_object *parent, u32 oclass)
}
struct nouveau_omthds {
- u32 method;
+ u32 start;
+ u32 limit;
int (*call)(struct nouveau_object *, u32, void *, u32);
};
@@ -81,12 +82,12 @@ struct nouveau_ofuncs {
void (*dtor)(struct nouveau_object *);
int (*init)(struct nouveau_object *);
int (*fini)(struct nouveau_object *, bool suspend);
- u8 (*rd08)(struct nouveau_object *, u32 offset);
- u16 (*rd16)(struct nouveau_object *, u32 offset);
- u32 (*rd32)(struct nouveau_object *, u32 offset);
- void (*wr08)(struct nouveau_object *, u32 offset, u8 data);
- void (*wr16)(struct nouveau_object *, u32 offset, u16 data);
- void (*wr32)(struct nouveau_object *, u32 offset, u32 data);
+ u8 (*rd08)(struct nouveau_object *, u64 offset);
+ u16 (*rd16)(struct nouveau_object *, u64 offset);
+ u32 (*rd32)(struct nouveau_object *, u64 offset);
+ void (*wr08)(struct nouveau_object *, u64 offset, u8 data);
+ void (*wr16)(struct nouveau_object *, u64 offset, u16 data);
+ void (*wr32)(struct nouveau_object *, u64 offset, u32 data);
};
static inline struct nouveau_ofuncs *
@@ -109,21 +110,27 @@ int nouveau_object_del(struct nouveau_object *, u32 parent, u32 handle);
void nouveau_object_debug(void);
static inline int
-nv_call(void *obj, u32 mthd, u32 data)
+nv_exec(void *obj, u32 mthd, void *data, u32 size)
{
struct nouveau_omthds *method = nv_oclass(obj)->omthds;
while (method && method->call) {
- if (method->method == mthd)
- return method->call(obj, mthd, &data, sizeof(data));
+ if (mthd >= method->start && mthd <= method->limit)
+ return method->call(obj, mthd, data, size);
method++;
}
return -EINVAL;
}
+static inline int
+nv_call(void *obj, u32 mthd, u32 data)
+{
+ return nv_exec(obj, mthd, &data, sizeof(data));
+}
+
static inline u8
-nv_ro08(void *obj, u32 addr)
+nv_ro08(void *obj, u64 addr)
{
u8 data = nv_ofuncs(obj)->rd08(obj, addr);
nv_spam(obj, "nv_ro08 0x%08x 0x%02x\n", addr, data);
@@ -131,7 +138,7 @@ nv_ro08(void *obj, u32 addr)
}
static inline u16
-nv_ro16(void *obj, u32 addr)
+nv_ro16(void *obj, u64 addr)
{
u16 data = nv_ofuncs(obj)->rd16(obj, addr);
nv_spam(obj, "nv_ro16 0x%08x 0x%04x\n", addr, data);
@@ -139,7 +146,7 @@ nv_ro16(void *obj, u32 addr)
}
static inline u32
-nv_ro32(void *obj, u32 addr)
+nv_ro32(void *obj, u64 addr)
{
u32 data = nv_ofuncs(obj)->rd32(obj, addr);
nv_spam(obj, "nv_ro32 0x%08x 0x%08x\n", addr, data);
@@ -147,42 +154,46 @@ nv_ro32(void *obj, u32 addr)
}
static inline void
-nv_wo08(void *obj, u32 addr, u8 data)
+nv_wo08(void *obj, u64 addr, u8 data)
{
nv_spam(obj, "nv_wo08 0x%08x 0x%02x\n", addr, data);
nv_ofuncs(obj)->wr08(obj, addr, data);
}
static inline void
-nv_wo16(void *obj, u32 addr, u16 data)
+nv_wo16(void *obj, u64 addr, u16 data)
{
nv_spam(obj, "nv_wo16 0x%08x 0x%04x\n", addr, data);
nv_ofuncs(obj)->wr16(obj, addr, data);
}
static inline void
-nv_wo32(void *obj, u32 addr, u32 data)
+nv_wo32(void *obj, u64 addr, u32 data)
{
nv_spam(obj, "nv_wo32 0x%08x 0x%08x\n", addr, data);
nv_ofuncs(obj)->wr32(obj, addr, data);
}
static inline u32
-nv_mo32(void *obj, u32 addr, u32 mask, u32 data)
+nv_mo32(void *obj, u64 addr, u32 mask, u32 data)
{
u32 temp = nv_ro32(obj, addr);
nv_wo32(obj, addr, (temp & ~mask) | data);
return temp;
}
-static inline bool
-nv_strncmp(void *obj, u32 addr, u32 len, const char *str)
+static inline int
+nv_memcmp(void *obj, u32 addr, const char *str, u32 len)
{
+ unsigned char c1, c2;
+
while (len--) {
- if (nv_ro08(obj, addr++) != *(str++))
- return false;
+ c1 = nv_ro08(obj, addr++);
+ c2 = *(str++);
+ if (c1 != c2)
+ return c1 - c2;
}
- return true;
+ return 0;
}
#endif
diff --git a/drivers/gpu/drm/nouveau/core/include/core/parent.h b/drivers/gpu/drm/nouveau/core/include/core/parent.h
index 3c2e940eb0f8..31cd852c96df 100644
--- a/drivers/gpu/drm/nouveau/core/include/core/parent.h
+++ b/drivers/gpu/drm/nouveau/core/include/core/parent.h
@@ -14,7 +14,7 @@ struct nouveau_parent {
struct nouveau_object base;
struct nouveau_sclass *sclass;
- u32 engine;
+ u64 engine;
int (*context_attach)(struct nouveau_object *,
struct nouveau_object *);
diff --git a/drivers/gpu/drm/nouveau/core/include/engine/bsp.h b/drivers/gpu/drm/nouveau/core/include/engine/bsp.h
index 75d1ed5f85fd..13ccdf54dfad 100644
--- a/drivers/gpu/drm/nouveau/core/include/engine/bsp.h
+++ b/drivers/gpu/drm/nouveau/core/include/engine/bsp.h
@@ -1,45 +1,8 @@
#ifndef __NOUVEAU_BSP_H__
#define __NOUVEAU_BSP_H__
-#include <core/engine.h>
-#include <core/engctx.h>
-
-struct nouveau_bsp_chan {
- struct nouveau_engctx base;
-};
-
-#define nouveau_bsp_context_create(p,e,c,g,s,a,f,d) \
- nouveau_engctx_create((p), (e), (c), (g), (s), (a), (f), (d))
-#define nouveau_bsp_context_destroy(d) \
- nouveau_engctx_destroy(&(d)->base)
-#define nouveau_bsp_context_init(d) \
- nouveau_engctx_init(&(d)->base)
-#define nouveau_bsp_context_fini(d,s) \
- nouveau_engctx_fini(&(d)->base, (s))
-
-#define _nouveau_bsp_context_dtor _nouveau_engctx_dtor
-#define _nouveau_bsp_context_init _nouveau_engctx_init
-#define _nouveau_bsp_context_fini _nouveau_engctx_fini
-#define _nouveau_bsp_context_rd32 _nouveau_engctx_rd32
-#define _nouveau_bsp_context_wr32 _nouveau_engctx_wr32
-
-struct nouveau_bsp {
- struct nouveau_engine base;
-};
-
-#define nouveau_bsp_create(p,e,c,d) \
- nouveau_engine_create((p), (e), (c), true, "PBSP", "bsp", (d))
-#define nouveau_bsp_destroy(d) \
- nouveau_engine_destroy(&(d)->base)
-#define nouveau_bsp_init(d) \
- nouveau_engine_init(&(d)->base)
-#define nouveau_bsp_fini(d,s) \
- nouveau_engine_fini(&(d)->base, (s))
-
-#define _nouveau_bsp_dtor _nouveau_engine_dtor
-#define _nouveau_bsp_init _nouveau_engine_init
-#define _nouveau_bsp_fini _nouveau_engine_fini
-
extern struct nouveau_oclass nv84_bsp_oclass;
+extern struct nouveau_oclass nvc0_bsp_oclass;
+extern struct nouveau_oclass nve0_bsp_oclass;
#endif
diff --git a/drivers/gpu/drm/nouveau/core/include/engine/copy.h b/drivers/gpu/drm/nouveau/core/include/engine/copy.h
index 70b9d8c5fcf5..8cad2cf28cef 100644
--- a/drivers/gpu/drm/nouveau/core/include/engine/copy.h
+++ b/drivers/gpu/drm/nouveau/core/include/engine/copy.h
@@ -1,44 +1,7 @@
#ifndef __NOUVEAU_COPY_H__
#define __NOUVEAU_COPY_H__
-#include <core/engine.h>
-#include <core/engctx.h>
-
-struct nouveau_copy_chan {
- struct nouveau_engctx base;
-};
-
-#define nouveau_copy_context_create(p,e,c,g,s,a,f,d) \
- nouveau_engctx_create((p), (e), (c), (g), (s), (a), (f), (d))
-#define nouveau_copy_context_destroy(d) \
- nouveau_engctx_destroy(&(d)->base)
-#define nouveau_copy_context_init(d) \
- nouveau_engctx_init(&(d)->base)
-#define nouveau_copy_context_fini(d,s) \
- nouveau_engctx_fini(&(d)->base, (s))
-
-#define _nouveau_copy_context_dtor _nouveau_engctx_dtor
-#define _nouveau_copy_context_init _nouveau_engctx_init
-#define _nouveau_copy_context_fini _nouveau_engctx_fini
-#define _nouveau_copy_context_rd32 _nouveau_engctx_rd32
-#define _nouveau_copy_context_wr32 _nouveau_engctx_wr32
-
-struct nouveau_copy {
- struct nouveau_engine base;
-};
-
-#define nouveau_copy_create(p,e,c,y,i,d) \
- nouveau_engine_create((p), (e), (c), (y), "PCE"#i, "copy"#i, (d))
-#define nouveau_copy_destroy(d) \
- nouveau_engine_destroy(&(d)->base)
-#define nouveau_copy_init(d) \
- nouveau_engine_init(&(d)->base)
-#define nouveau_copy_fini(d,s) \
- nouveau_engine_fini(&(d)->base, (s))
-
-#define _nouveau_copy_dtor _nouveau_engine_dtor
-#define _nouveau_copy_init _nouveau_engine_init
-#define _nouveau_copy_fini _nouveau_engine_fini
+void nva3_copy_intr(struct nouveau_subdev *);
extern struct nouveau_oclass nva3_copy_oclass;
extern struct nouveau_oclass nvc0_copy0_oclass;
diff --git a/drivers/gpu/drm/nouveau/core/include/engine/crypt.h b/drivers/gpu/drm/nouveau/core/include/engine/crypt.h
index e3674743baaa..db975618e937 100644
--- a/drivers/gpu/drm/nouveau/core/include/engine/crypt.h
+++ b/drivers/gpu/drm/nouveau/core/include/engine/crypt.h
@@ -1,45 +1,6 @@
#ifndef __NOUVEAU_CRYPT_H__
#define __NOUVEAU_CRYPT_H__
-#include <core/engine.h>
-#include <core/engctx.h>
-
-struct nouveau_crypt_chan {
- struct nouveau_engctx base;
-};
-
-#define nouveau_crypt_context_create(p,e,c,g,s,a,f,d) \
- nouveau_engctx_create((p), (e), (c), (g), (s), (a), (f), (d))
-#define nouveau_crypt_context_destroy(d) \
- nouveau_engctx_destroy(&(d)->base)
-#define nouveau_crypt_context_init(d) \
- nouveau_engctx_init(&(d)->base)
-#define nouveau_crypt_context_fini(d,s) \
- nouveau_engctx_fini(&(d)->base, (s))
-
-#define _nouveau_crypt_context_dtor _nouveau_engctx_dtor
-#define _nouveau_crypt_context_init _nouveau_engctx_init
-#define _nouveau_crypt_context_fini _nouveau_engctx_fini
-#define _nouveau_crypt_context_rd32 _nouveau_engctx_rd32
-#define _nouveau_crypt_context_wr32 _nouveau_engctx_wr32
-
-struct nouveau_crypt {
- struct nouveau_engine base;
-};
-
-#define nouveau_crypt_create(p,e,c,d) \
- nouveau_engine_create((p), (e), (c), true, "PCRYPT", "crypt", (d))
-#define nouveau_crypt_destroy(d) \
- nouveau_engine_destroy(&(d)->base)
-#define nouveau_crypt_init(d) \
- nouveau_engine_init(&(d)->base)
-#define nouveau_crypt_fini(d,s) \
- nouveau_engine_fini(&(d)->base, (s))
-
-#define _nouveau_crypt_dtor _nouveau_engine_dtor
-#define _nouveau_crypt_init _nouveau_engine_init
-#define _nouveau_crypt_fini _nouveau_engine_fini
-
extern struct nouveau_oclass nv84_crypt_oclass;
extern struct nouveau_oclass nv98_crypt_oclass;
diff --git a/drivers/gpu/drm/nouveau/core/include/engine/disp.h b/drivers/gpu/drm/nouveau/core/include/engine/disp.h
index 38ec1252cbaa..46948285f3e7 100644
--- a/drivers/gpu/drm/nouveau/core/include/engine/disp.h
+++ b/drivers/gpu/drm/nouveau/core/include/engine/disp.h
@@ -39,6 +39,11 @@ nouveau_disp(void *obj)
extern struct nouveau_oclass nv04_disp_oclass;
extern struct nouveau_oclass nv50_disp_oclass;
+extern struct nouveau_oclass nv84_disp_oclass;
+extern struct nouveau_oclass nva0_disp_oclass;
+extern struct nouveau_oclass nv94_disp_oclass;
+extern struct nouveau_oclass nva3_disp_oclass;
extern struct nouveau_oclass nvd0_disp_oclass;
+extern struct nouveau_oclass nve0_disp_oclass;
#endif
diff --git a/drivers/gpu/drm/nouveau/core/include/engine/dmaobj.h b/drivers/gpu/drm/nouveau/core/include/engine/dmaobj.h
index 700ccbb1941f..b28914ed1752 100644
--- a/drivers/gpu/drm/nouveau/core/include/engine/dmaobj.h
+++ b/drivers/gpu/drm/nouveau/core/include/engine/dmaobj.h
@@ -12,29 +12,17 @@ struct nouveau_dmaobj {
u32 access;
u64 start;
u64 limit;
+ u32 conf0;
};
-#define nouveau_dmaobj_create(p,e,c,a,s,d) \
- nouveau_dmaobj_create_((p), (e), (c), (a), (s), sizeof(**d), (void **)d)
-#define nouveau_dmaobj_destroy(p) \
- nouveau_object_destroy(&(p)->base)
-#define nouveau_dmaobj_init(p) \
- nouveau_object_init(&(p)->base)
-#define nouveau_dmaobj_fini(p,s) \
- nouveau_object_fini(&(p)->base, (s))
-
-int nouveau_dmaobj_create_(struct nouveau_object *, struct nouveau_object *,
- struct nouveau_oclass *, void *data, u32 size,
- int length, void **);
-
-#define _nouveau_dmaobj_dtor nouveau_object_destroy
-#define _nouveau_dmaobj_init nouveau_object_init
-#define _nouveau_dmaobj_fini nouveau_object_fini
-
struct nouveau_dmaeng {
struct nouveau_engine base;
- int (*bind)(struct nouveau_dmaeng *, struct nouveau_object *parent,
- struct nouveau_dmaobj *, struct nouveau_gpuobj **);
+
+ /* creates a "physical" dma object from a struct nouveau_dmaobj */
+ int (*bind)(struct nouveau_dmaeng *dmaeng,
+ struct nouveau_object *parent,
+ struct nouveau_dmaobj *dmaobj,
+ struct nouveau_gpuobj **);
};
#define nouveau_dmaeng_create(p,e,c,d) \
@@ -53,5 +41,8 @@ struct nouveau_dmaeng {
extern struct nouveau_oclass nv04_dmaeng_oclass;
extern struct nouveau_oclass nv50_dmaeng_oclass;
extern struct nouveau_oclass nvc0_dmaeng_oclass;
+extern struct nouveau_oclass nvd0_dmaeng_oclass;
+
+extern struct nouveau_oclass nouveau_dmaobj_sclass[];
#endif
diff --git a/drivers/gpu/drm/nouveau/core/include/engine/fifo.h b/drivers/gpu/drm/nouveau/core/include/engine/fifo.h
index d67fed1e3970..f18846c8c6fe 100644
--- a/drivers/gpu/drm/nouveau/core/include/engine/fifo.h
+++ b/drivers/gpu/drm/nouveau/core/include/engine/fifo.h
@@ -33,15 +33,15 @@ int nouveau_fifo_channel_create_(struct nouveau_object *,
struct nouveau_object *,
struct nouveau_oclass *,
int bar, u32 addr, u32 size, u32 push,
- u32 engmask, int len, void **);
+ u64 engmask, int len, void **);
void nouveau_fifo_channel_destroy(struct nouveau_fifo_chan *);
#define _nouveau_fifo_channel_init _nouveau_namedb_init
#define _nouveau_fifo_channel_fini _nouveau_namedb_fini
void _nouveau_fifo_channel_dtor(struct nouveau_object *);
-u32 _nouveau_fifo_channel_rd32(struct nouveau_object *, u32);
-void _nouveau_fifo_channel_wr32(struct nouveau_object *, u32, u32);
+u32 _nouveau_fifo_channel_rd32(struct nouveau_object *, u64);
+void _nouveau_fifo_channel_wr32(struct nouveau_object *, u64, u32);
struct nouveau_fifo_base {
struct nouveau_gpuobj base;
diff --git a/drivers/gpu/drm/nouveau/core/include/engine/ppp.h b/drivers/gpu/drm/nouveau/core/include/engine/ppp.h
index 74d554fb3281..0a66781e8cf1 100644
--- a/drivers/gpu/drm/nouveau/core/include/engine/ppp.h
+++ b/drivers/gpu/drm/nouveau/core/include/engine/ppp.h
@@ -1,45 +1,7 @@
#ifndef __NOUVEAU_PPP_H__
#define __NOUVEAU_PPP_H__
-#include <core/engine.h>
-#include <core/engctx.h>
-
-struct nouveau_ppp_chan {
- struct nouveau_engctx base;
-};
-
-#define nouveau_ppp_context_create(p,e,c,g,s,a,f,d) \
- nouveau_engctx_create((p), (e), (c), (g), (s), (a), (f), (d))
-#define nouveau_ppp_context_destroy(d) \
- nouveau_engctx_destroy(&(d)->base)
-#define nouveau_ppp_context_init(d) \
- nouveau_engctx_init(&(d)->base)
-#define nouveau_ppp_context_fini(d,s) \
- nouveau_engctx_fini(&(d)->base, (s))
-
-#define _nouveau_ppp_context_dtor _nouveau_engctx_dtor
-#define _nouveau_ppp_context_init _nouveau_engctx_init
-#define _nouveau_ppp_context_fini _nouveau_engctx_fini
-#define _nouveau_ppp_context_rd32 _nouveau_engctx_rd32
-#define _nouveau_ppp_context_wr32 _nouveau_engctx_wr32
-
-struct nouveau_ppp {
- struct nouveau_engine base;
-};
-
-#define nouveau_ppp_create(p,e,c,d) \
- nouveau_engine_create((p), (e), (c), true, "PPPP", "ppp", (d))
-#define nouveau_ppp_destroy(d) \
- nouveau_engine_destroy(&(d)->base)
-#define nouveau_ppp_init(d) \
- nouveau_engine_init(&(d)->base)
-#define nouveau_ppp_fini(d,s) \
- nouveau_engine_fini(&(d)->base, (s))
-
-#define _nouveau_ppp_dtor _nouveau_engine_dtor
-#define _nouveau_ppp_init _nouveau_engine_init
-#define _nouveau_ppp_fini _nouveau_engine_fini
-
extern struct nouveau_oclass nv98_ppp_oclass;
+extern struct nouveau_oclass nvc0_ppp_oclass;
#endif
diff --git a/drivers/gpu/drm/nouveau/core/include/engine/vp.h b/drivers/gpu/drm/nouveau/core/include/engine/vp.h
index 05cd08fba377..d7b287b115bf 100644
--- a/drivers/gpu/drm/nouveau/core/include/engine/vp.h
+++ b/drivers/gpu/drm/nouveau/core/include/engine/vp.h
@@ -1,45 +1,8 @@
#ifndef __NOUVEAU_VP_H__
#define __NOUVEAU_VP_H__
-#include <core/engine.h>
-#include <core/engctx.h>
-
-struct nouveau_vp_chan {
- struct nouveau_engctx base;
-};
-
-#define nouveau_vp_context_create(p,e,c,g,s,a,f,d) \
- nouveau_engctx_create((p), (e), (c), (g), (s), (a), (f), (d))
-#define nouveau_vp_context_destroy(d) \
- nouveau_engctx_destroy(&(d)->base)
-#define nouveau_vp_context_init(d) \
- nouveau_engctx_init(&(d)->base)
-#define nouveau_vp_context_fini(d,s) \
- nouveau_engctx_fini(&(d)->base, (s))
-
-#define _nouveau_vp_context_dtor _nouveau_engctx_dtor
-#define _nouveau_vp_context_init _nouveau_engctx_init
-#define _nouveau_vp_context_fini _nouveau_engctx_fini
-#define _nouveau_vp_context_rd32 _nouveau_engctx_rd32
-#define _nouveau_vp_context_wr32 _nouveau_engctx_wr32
-
-struct nouveau_vp {
- struct nouveau_engine base;
-};
-
-#define nouveau_vp_create(p,e,c,d) \
- nouveau_engine_create((p), (e), (c), true, "PVP", "vp", (d))
-#define nouveau_vp_destroy(d) \
- nouveau_engine_destroy(&(d)->base)
-#define nouveau_vp_init(d) \
- nouveau_engine_init(&(d)->base)
-#define nouveau_vp_fini(d,s) \
- nouveau_engine_fini(&(d)->base, (s))
-
-#define _nouveau_vp_dtor _nouveau_engine_dtor
-#define _nouveau_vp_init _nouveau_engine_init
-#define _nouveau_vp_fini _nouveau_engine_fini
-
extern struct nouveau_oclass nv84_vp_oclass;
+extern struct nouveau_oclass nvc0_vp_oclass;
+extern struct nouveau_oclass nve0_vp_oclass;
#endif
diff --git a/drivers/gpu/drm/nouveau/core/include/subdev/bios/dcb.h b/drivers/gpu/drm/nouveau/core/include/subdev/bios/dcb.h
index d682fb625833..b79025da581e 100644
--- a/drivers/gpu/drm/nouveau/core/include/subdev/bios/dcb.h
+++ b/drivers/gpu/drm/nouveau/core/include/subdev/bios/dcb.h
@@ -23,6 +23,7 @@ struct dcb_output {
uint8_t bus;
uint8_t location;
uint8_t or;
+ uint8_t link;
bool duallink_possible;
union {
struct sor_conf {
@@ -55,36 +56,11 @@ struct dcb_output {
u16 dcb_table(struct nouveau_bios *, u8 *ver, u8 *hdr, u8 *ent, u8 *len);
u16 dcb_outp(struct nouveau_bios *, u8 idx, u8 *ver, u8 *len);
+u16 dcb_outp_parse(struct nouveau_bios *, u8 idx, u8 *, u8 *,
+ struct dcb_output *);
+u16 dcb_outp_match(struct nouveau_bios *, u16 type, u16 mask, u8 *, u8 *,
+ struct dcb_output *);
int dcb_outp_foreach(struct nouveau_bios *, void *data, int (*exec)
(struct nouveau_bios *, void *, int index, u16 entry));
-
-/* BIT 'U'/'d' table encoder subtables have hashes matching them to
- * a particular set of encoders.
- *
- * This function returns true if a particular DCB entry matches.
- */
-static inline bool
-dcb_hash_match(struct dcb_output *dcb, u32 hash)
-{
- if ((hash & 0x000000f0) != (dcb->location << 4))
- return false;
- if ((hash & 0x0000000f) != dcb->type)
- return false;
- if (!(hash & (dcb->or << 16)))
- return false;
-
- switch (dcb->type) {
- case DCB_OUTPUT_TMDS:
- case DCB_OUTPUT_LVDS:
- case DCB_OUTPUT_DP:
- if (hash & 0x00c00000) {
- if (!(hash & (dcb->sorconf.link << 22)))
- return false;
- }
- default:
- return true;
- }
-}
-
#endif
diff --git a/drivers/gpu/drm/nouveau/core/include/subdev/bios/disp.h b/drivers/gpu/drm/nouveau/core/include/subdev/bios/disp.h
new file mode 100644
index 000000000000..c35937e2f6a4
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/include/subdev/bios/disp.h
@@ -0,0 +1,48 @@
+#ifndef __NVBIOS_DISP_H__
+#define __NVBIOS_DISP_H__
+
+u16 nvbios_disp_table(struct nouveau_bios *,
+ u8 *ver, u8 *hdr, u8 *cnt, u8 *len, u8 *sub);
+
+struct nvbios_disp {
+ u16 data;
+};
+
+u16 nvbios_disp_entry(struct nouveau_bios *, u8 idx,
+ u8 *ver, u8 *hdr__, u8 *sub);
+u16 nvbios_disp_parse(struct nouveau_bios *, u8 idx,
+ u8 *ver, u8 *hdr__, u8 *sub,
+ struct nvbios_disp *);
+
+struct nvbios_outp {
+ u16 type;
+ u16 mask;
+ u16 script[3];
+};
+
+u16 nvbios_outp_entry(struct nouveau_bios *, u8 idx,
+ u8 *ver, u8 *hdr, u8 *cnt, u8 *len);
+u16 nvbios_outp_parse(struct nouveau_bios *, u8 idx,
+ u8 *ver, u8 *hdr, u8 *cnt, u8 *len,
+ struct nvbios_outp *);
+u16 nvbios_outp_match(struct nouveau_bios *, u16 type, u16 mask,
+ u8 *ver, u8 *hdr, u8 *cnt, u8 *len,
+ struct nvbios_outp *);
+
+
+struct nvbios_ocfg {
+ u16 match;
+ u16 clkcmp[2];
+};
+
+u16 nvbios_ocfg_entry(struct nouveau_bios *, u16 outp, u8 idx,
+ u8 *ver, u8 *hdr, u8 *cnt, u8 *len);
+u16 nvbios_ocfg_parse(struct nouveau_bios *, u16 outp, u8 idx,
+ u8 *ver, u8 *hdr, u8 *cnt, u8 *len,
+ struct nvbios_ocfg *);
+u16 nvbios_ocfg_match(struct nouveau_bios *, u16 outp, u16 type,
+ u8 *ver, u8 *hdr, u8 *cnt, u8 *len,
+ struct nvbios_ocfg *);
+u16 nvbios_oclk_match(struct nouveau_bios *, u16 cmp, u32 khz);
+
+#endif
diff --git a/drivers/gpu/drm/nouveau/core/include/subdev/bios/dp.h b/drivers/gpu/drm/nouveau/core/include/subdev/bios/dp.h
index 73b5e5d3e75a..6e54218b55fc 100644
--- a/drivers/gpu/drm/nouveau/core/include/subdev/bios/dp.h
+++ b/drivers/gpu/drm/nouveau/core/include/subdev/bios/dp.h
@@ -1,8 +1,34 @@
#ifndef __NVBIOS_DP_H__
#define __NVBIOS_DP_H__
-u16 dp_table(struct nouveau_bios *, u8 *ver, u8 *hdr, u8 *cnt, u8 *len);
-u16 dp_outp(struct nouveau_bios *, u8 idx, u8 *ver, u8 *len);
-u16 dp_outp_match(struct nouveau_bios *, struct dcb_output *, u8 *ver, u8 *len);
+struct nvbios_dpout {
+ u16 type;
+ u16 mask;
+ u8 flags;
+ u32 script[5];
+ u32 lnkcmp;
+};
+
+u16 nvbios_dpout_parse(struct nouveau_bios *, u8 idx,
+ u8 *ver, u8 *hdr, u8 *cnt, u8 *len,
+ struct nvbios_dpout *);
+u16 nvbios_dpout_match(struct nouveau_bios *, u16 type, u16 mask,
+ u8 *ver, u8 *hdr, u8 *cnt, u8 *len,
+ struct nvbios_dpout *);
+
+struct nvbios_dpcfg {
+ u8 drv;
+ u8 pre;
+ u8 unk;
+};
+
+u16
+nvbios_dpcfg_parse(struct nouveau_bios *, u16 outp, u8 idx,
+ u8 *ver, u8 *hdr, u8 *cnt, u8 *len,
+ struct nvbios_dpcfg *);
+u16
+nvbios_dpcfg_match(struct nouveau_bios *, u16 outp, u8 un, u8 vs, u8 pe,
+ u8 *ver, u8 *hdr, u8 *cnt, u8 *len,
+ struct nvbios_dpcfg *);
#endif
diff --git a/drivers/gpu/drm/nouveau/core/include/subdev/clock.h b/drivers/gpu/drm/nouveau/core/include/subdev/clock.h
index 39e73b91d360..41b7a6a76f19 100644
--- a/drivers/gpu/drm/nouveau/core/include/subdev/clock.h
+++ b/drivers/gpu/drm/nouveau/core/include/subdev/clock.h
@@ -54,6 +54,7 @@ int nv04_clock_pll_calc(struct nouveau_clock *, struct nvbios_pll *,
int clk, struct nouveau_pll_vals *);
int nv04_clock_pll_prog(struct nouveau_clock *, u32 reg1,
struct nouveau_pll_vals *);
-
+int nva3_clock_pll_calc(struct nouveau_clock *, struct nvbios_pll *,
+ int clk, struct nouveau_pll_vals *);
#endif
diff --git a/drivers/gpu/drm/nouveau/core/include/subdev/fb.h b/drivers/gpu/drm/nouveau/core/include/subdev/fb.h
index 5c1b5e1904f9..da470e6851b1 100644
--- a/drivers/gpu/drm/nouveau/core/include/subdev/fb.h
+++ b/drivers/gpu/drm/nouveau/core/include/subdev/fb.h
@@ -69,8 +69,11 @@ struct nouveau_fb {
} type;
u64 stolen;
u64 size;
+
int ranks;
+ int parts;
+ int (*init)(struct nouveau_fb *);
int (*get)(struct nouveau_fb *, u64 size, u32 align,
u32 size_nc, u32 type, struct nouveau_mem **);
void (*put)(struct nouveau_fb *, struct nouveau_mem **);
@@ -84,6 +87,8 @@ struct nouveau_fb {
int regions;
void (*init)(struct nouveau_fb *, int i, u32 addr, u32 size,
u32 pitch, u32 flags, struct nouveau_fb_tile *);
+ void (*comp)(struct nouveau_fb *, int i, u32 size, u32 flags,
+ struct nouveau_fb_tile *);
void (*fini)(struct nouveau_fb *, int i,
struct nouveau_fb_tile *);
void (*prog)(struct nouveau_fb *, int i,
@@ -99,7 +104,7 @@ nouveau_fb(void *obj)
#define nouveau_fb_create(p,e,c,d) \
nouveau_subdev_create((p), (e), (c), 0, "PFB", "fb", (d))
-int nouveau_fb_created(struct nouveau_fb *);
+int nouveau_fb_preinit(struct nouveau_fb *);
void nouveau_fb_destroy(struct nouveau_fb *);
int nouveau_fb_init(struct nouveau_fb *);
#define nouveau_fb_fini(p,s) \
@@ -111,9 +116,19 @@ int _nouveau_fb_init(struct nouveau_object *);
extern struct nouveau_oclass nv04_fb_oclass;
extern struct nouveau_oclass nv10_fb_oclass;
+extern struct nouveau_oclass nv1a_fb_oclass;
extern struct nouveau_oclass nv20_fb_oclass;
+extern struct nouveau_oclass nv25_fb_oclass;
extern struct nouveau_oclass nv30_fb_oclass;
+extern struct nouveau_oclass nv35_fb_oclass;
+extern struct nouveau_oclass nv36_fb_oclass;
extern struct nouveau_oclass nv40_fb_oclass;
+extern struct nouveau_oclass nv41_fb_oclass;
+extern struct nouveau_oclass nv44_fb_oclass;
+extern struct nouveau_oclass nv46_fb_oclass;
+extern struct nouveau_oclass nv47_fb_oclass;
+extern struct nouveau_oclass nv49_fb_oclass;
+extern struct nouveau_oclass nv4e_fb_oclass;
extern struct nouveau_oclass nv50_fb_oclass;
extern struct nouveau_oclass nvc0_fb_oclass;
@@ -122,13 +137,35 @@ int nouveau_fb_bios_memtype(struct nouveau_bios *);
bool nv04_fb_memtype_valid(struct nouveau_fb *, u32 memtype);
+void nv10_fb_tile_init(struct nouveau_fb *, int i, u32 addr, u32 size,
+ u32 pitch, u32 flags, struct nouveau_fb_tile *);
+void nv10_fb_tile_fini(struct nouveau_fb *, int i, struct nouveau_fb_tile *);
void nv10_fb_tile_prog(struct nouveau_fb *, int, struct nouveau_fb_tile *);
+int nv20_fb_vram_init(struct nouveau_fb *);
+void nv20_fb_tile_init(struct nouveau_fb *, int i, u32 addr, u32 size,
+ u32 pitch, u32 flags, struct nouveau_fb_tile *);
+void nv20_fb_tile_fini(struct nouveau_fb *, int i, struct nouveau_fb_tile *);
+void nv20_fb_tile_prog(struct nouveau_fb *, int, struct nouveau_fb_tile *);
+
+int nv30_fb_init(struct nouveau_object *);
void nv30_fb_tile_init(struct nouveau_fb *, int i, u32 addr, u32 size,
u32 pitch, u32 flags, struct nouveau_fb_tile *);
-void nv30_fb_tile_fini(struct nouveau_fb *, int i, struct nouveau_fb_tile *);
+
+void nv40_fb_tile_comp(struct nouveau_fb *, int i, u32 size, u32 flags,
+ struct nouveau_fb_tile *);
+
+int nv41_fb_vram_init(struct nouveau_fb *);
+int nv41_fb_init(struct nouveau_object *);
+void nv41_fb_tile_prog(struct nouveau_fb *, int, struct nouveau_fb_tile *);
+
+int nv44_fb_vram_init(struct nouveau_fb *);
+int nv44_fb_init(struct nouveau_object *);
+void nv44_fb_tile_prog(struct nouveau_fb *, int, struct nouveau_fb_tile *);
+
+void nv46_fb_tile_init(struct nouveau_fb *, int i, u32 addr, u32 size,
+ u32 pitch, u32 flags, struct nouveau_fb_tile *);
void nv50_fb_vram_del(struct nouveau_fb *, struct nouveau_mem **);
-void nv50_fb_trap(struct nouveau_fb *, int display);
#endif
diff --git a/drivers/gpu/drm/nouveau/core/subdev/bar/base.c b/drivers/gpu/drm/nouveau/core/subdev/bar/base.c
index cd01c533007a..d70ba342aa2e 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/bar/base.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/bar/base.c
@@ -65,14 +65,14 @@ nouveau_barobj_dtor(struct nouveau_object *object)
}
static u32
-nouveau_barobj_rd32(struct nouveau_object *object, u32 addr)
+nouveau_barobj_rd32(struct nouveau_object *object, u64 addr)
{
struct nouveau_barobj *barobj = (void *)object;
return ioread32_native(barobj->iomem + addr);
}
static void
-nouveau_barobj_wr32(struct nouveau_object *object, u32 addr, u32 data)
+nouveau_barobj_wr32(struct nouveau_object *object, u64 addr, u32 data)
{
struct nouveau_barobj *barobj = (void *)object;
iowrite32_native(data, barobj->iomem + addr);
diff --git a/drivers/gpu/drm/nouveau/core/subdev/bios/base.c b/drivers/gpu/drm/nouveau/core/subdev/bios/base.c
index 70ca7d5a1aa1..dd111947eb86 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/bios/base.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/bios/base.c
@@ -63,7 +63,7 @@ nouveau_bios_shadow_of(struct nouveau_bios *bios)
struct pci_dev *pdev = nv_device(bios)->pdev;
struct device_node *dn;
const u32 *data;
- int size, i;
+ int size;
dn = pci_device_to_OF_node(pdev);
if (!dn) {
@@ -210,11 +210,19 @@ nouveau_bios_shadow_acpi(struct nouveau_bios *bios)
return;
bios->data = kmalloc(bios->size, GFP_KERNEL);
- for (i = 0; bios->data && i < bios->size; i += cnt) {
- cnt = min((bios->size - i), (u32)4096);
- ret = nouveau_acpi_get_bios_chunk(bios->data, i, cnt);
- if (ret != cnt)
- break;
+ if (bios->data) {
+ /* disobey the acpi spec - much faster on at least w530 ... */
+ ret = nouveau_acpi_get_bios_chunk(bios->data, 0, bios->size);
+ if (ret != bios->size ||
+ nvbios_checksum(bios->data, bios->size)) {
+ /* ... that didn't work, ok, i'll be good now */
+ for (i = 0; i < bios->size; i += cnt) {
+ cnt = min((bios->size - i), (u32)4096);
+ ret = nouveau_acpi_get_bios_chunk(bios->data, i, cnt);
+ if (ret != cnt)
+ break;
+ }
+ }
}
}
@@ -358,42 +366,42 @@ nouveau_bios_shadow(struct nouveau_bios *bios)
}
static u8
-nouveau_bios_rd08(struct nouveau_object *object, u32 addr)
+nouveau_bios_rd08(struct nouveau_object *object, u64 addr)
{
struct nouveau_bios *bios = (void *)object;
return bios->data[addr];
}
static u16
-nouveau_bios_rd16(struct nouveau_object *object, u32 addr)
+nouveau_bios_rd16(struct nouveau_object *object, u64 addr)
{
struct nouveau_bios *bios = (void *)object;
return get_unaligned_le16(&bios->data[addr]);
}
static u32
-nouveau_bios_rd32(struct nouveau_object *object, u32 addr)
+nouveau_bios_rd32(struct nouveau_object *object, u64 addr)
{
struct nouveau_bios *bios = (void *)object;
return get_unaligned_le32(&bios->data[addr]);
}
static void
-nouveau_bios_wr08(struct nouveau_object *object, u32 addr, u8 data)
+nouveau_bios_wr08(struct nouveau_object *object, u64 addr, u8 data)
{
struct nouveau_bios *bios = (void *)object;
bios->data[addr] = data;
}
static void
-nouveau_bios_wr16(struct nouveau_object *object, u32 addr, u16 data)
+nouveau_bios_wr16(struct nouveau_object *object, u64 addr, u16 data)
{
struct nouveau_bios *bios = (void *)object;
put_unaligned_le16(data, &bios->data[addr]);
}
static void
-nouveau_bios_wr32(struct nouveau_object *object, u32 addr, u32 data)
+nouveau_bios_wr32(struct nouveau_object *object, u64 addr, u32 data)
{
struct nouveau_bios *bios = (void *)object;
put_unaligned_le32(data, &bios->data[addr]);
diff --git a/drivers/gpu/drm/nouveau/core/subdev/bios/dcb.c b/drivers/gpu/drm/nouveau/core/subdev/bios/dcb.c
index 7d750382a833..0fd87df99dd6 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/bios/dcb.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/bios/dcb.c
@@ -64,7 +64,7 @@ dcb_table(struct nouveau_bios *bios, u8 *ver, u8 *hdr, u8 *cnt, u8 *len)
}
} else
if (*ver >= 0x15) {
- if (!nv_strncmp(bios, dcb - 7, 7, "DEV_REC")) {
+ if (!nv_memcmp(bios, dcb - 7, "DEV_REC", 7)) {
u16 i2c = nv_ro16(bios, dcb + 2);
*hdr = 4;
*cnt = (i2c - dcb) / 10;
@@ -107,6 +107,69 @@ dcb_outp(struct nouveau_bios *bios, u8 idx, u8 *ver, u8 *len)
return 0x0000;
}
+u16
+dcb_outp_parse(struct nouveau_bios *bios, u8 idx, u8 *ver, u8 *len,
+ struct dcb_output *outp)
+{
+ u16 dcb = dcb_outp(bios, idx, ver, len);
+ if (dcb) {
+ if (*ver >= 0x20) {
+ u32 conn = nv_ro32(bios, dcb + 0x00);
+ outp->or = (conn & 0x0f000000) >> 24;
+ outp->location = (conn & 0x00300000) >> 20;
+ outp->bus = (conn & 0x000f0000) >> 16;
+ outp->connector = (conn & 0x0000f000) >> 12;
+ outp->heads = (conn & 0x00000f00) >> 8;
+ outp->i2c_index = (conn & 0x000000f0) >> 4;
+ outp->type = (conn & 0x0000000f);
+ outp->link = 0;
+ } else {
+ dcb = 0x0000;
+ }
+
+ if (*ver >= 0x40) {
+ u32 conf = nv_ro32(bios, dcb + 0x04);
+ switch (outp->type) {
+ case DCB_OUTPUT_TMDS:
+ case DCB_OUTPUT_LVDS:
+ case DCB_OUTPUT_DP:
+ outp->link = (conf & 0x00000030) >> 4;
+ outp->sorconf.link = outp->link; /*XXX*/
+ break;
+ default:
+ break;
+ }
+ }
+ }
+ return dcb;
+}
+
+static inline u16
+dcb_outp_hasht(struct dcb_output *outp)
+{
+ return outp->type;
+}
+
+static inline u16
+dcb_outp_hashm(struct dcb_output *outp)
+{
+ return (outp->heads << 8) | (outp->link << 6) | outp->or;
+}
+
+u16
+dcb_outp_match(struct nouveau_bios *bios, u16 type, u16 mask,
+ u8 *ver, u8 *len, struct dcb_output *outp)
+{
+ u16 dcb, idx = 0;
+ while ((dcb = dcb_outp_parse(bios, idx++, ver, len, outp))) {
+ if (dcb_outp_hasht(outp) == type) {
+ if ((dcb_outp_hashm(outp) & mask) == mask)
+ break;
+ }
+ }
+ return dcb;
+}
+
int
dcb_outp_foreach(struct nouveau_bios *bios, void *data,
int (*exec)(struct nouveau_bios *, void *, int, u16))
diff --git a/drivers/gpu/drm/nouveau/core/subdev/bios/disp.c b/drivers/gpu/drm/nouveau/core/subdev/bios/disp.c
new file mode 100644
index 000000000000..7f16e52d9bea
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/bios/disp.c
@@ -0,0 +1,178 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include <subdev/bios.h>
+#include <subdev/bios/bit.h>
+#include <subdev/bios/disp.h>
+
+u16
+nvbios_disp_table(struct nouveau_bios *bios,
+ u8 *ver, u8 *hdr, u8 *cnt, u8 *len, u8 *sub)
+{
+ struct bit_entry U;
+
+ if (!bit_entry(bios, 'U', &U)) {
+ if (U.version == 1) {
+ u16 data = nv_ro16(bios, U.offset);
+ if (data) {
+ *ver = nv_ro08(bios, data + 0x00);
+ switch (*ver) {
+ case 0x20:
+ case 0x21:
+ *hdr = nv_ro08(bios, data + 0x01);
+ *len = nv_ro08(bios, data + 0x02);
+ *cnt = nv_ro08(bios, data + 0x03);
+ *sub = nv_ro08(bios, data + 0x04);
+ return data;
+ default:
+ break;
+ }
+ }
+ }
+ }
+
+ return 0x0000;
+}
+
+u16
+nvbios_disp_entry(struct nouveau_bios *bios, u8 idx,
+ u8 *ver, u8 *len, u8 *sub)
+{
+ u8 hdr, cnt;
+ u16 data = nvbios_disp_table(bios, ver, &hdr, &cnt, len, sub);
+ if (data && idx < cnt)
+ return data + hdr + (idx * *len);
+ *ver = 0x00;
+ return 0x0000;
+}
+
+u16
+nvbios_disp_parse(struct nouveau_bios *bios, u8 idx,
+ u8 *ver, u8 *len, u8 *sub,
+ struct nvbios_disp *info)
+{
+ u16 data = nvbios_disp_entry(bios, idx, ver, len, sub);
+ if (data && *len >= 2) {
+ info->data = nv_ro16(bios, data + 0);
+ return data;
+ }
+ return 0x0000;
+}
+
+u16
+nvbios_outp_entry(struct nouveau_bios *bios, u8 idx,
+ u8 *ver, u8 *hdr, u8 *cnt, u8 *len)
+{
+ struct nvbios_disp info;
+ u16 data = nvbios_disp_parse(bios, idx, ver, len, hdr, &info);
+ if (data) {
+ *cnt = nv_ro08(bios, info.data + 0x05);
+ *len = 0x06;
+ data = info.data;
+ }
+ return data;
+}
+
+u16
+nvbios_outp_parse(struct nouveau_bios *bios, u8 idx,
+ u8 *ver, u8 *hdr, u8 *cnt, u8 *len,
+ struct nvbios_outp *info)
+{
+ u16 data = nvbios_outp_entry(bios, idx, ver, hdr, cnt, len);
+ if (data && *hdr >= 0x0a) {
+ info->type = nv_ro16(bios, data + 0x00);
+ info->mask = nv_ro32(bios, data + 0x02);
+ if (*ver <= 0x20) /* match any link */
+ info->mask |= 0x00c0;
+ info->script[0] = nv_ro16(bios, data + 0x06);
+ info->script[1] = nv_ro16(bios, data + 0x08);
+ info->script[2] = 0x0000;
+ if (*hdr >= 0x0c)
+ info->script[2] = nv_ro16(bios, data + 0x0a);
+ return data;
+ }
+ return 0x0000;
+}
+
+u16
+nvbios_outp_match(struct nouveau_bios *bios, u16 type, u16 mask,
+ u8 *ver, u8 *hdr, u8 *cnt, u8 *len,
+ struct nvbios_outp *info)
+{
+ u16 data, idx = 0;
+ while ((data = nvbios_outp_parse(bios, idx++, ver, hdr, cnt, len, info)) || *ver) {
+ if (data && info->type == type) {
+ if ((info->mask & mask) == mask)
+ break;
+ }
+ }
+ return data;
+}
+
+u16
+nvbios_ocfg_entry(struct nouveau_bios *bios, u16 outp, u8 idx,
+ u8 *ver, u8 *hdr, u8 *cnt, u8 *len)
+{
+ if (idx < *cnt)
+ return outp + *hdr + (idx * *len);
+ return 0x0000;
+}
+
+u16
+nvbios_ocfg_parse(struct nouveau_bios *bios, u16 outp, u8 idx,
+ u8 *ver, u8 *hdr, u8 *cnt, u8 *len,
+ struct nvbios_ocfg *info)
+{
+ u16 data = nvbios_ocfg_entry(bios, outp, idx, ver, hdr, cnt, len);
+ if (data) {
+ info->match = nv_ro16(bios, data + 0x00);
+ info->clkcmp[0] = nv_ro16(bios, data + 0x02);
+ info->clkcmp[1] = nv_ro16(bios, data + 0x04);
+ }
+ return data;
+}
+
+u16
+nvbios_ocfg_match(struct nouveau_bios *bios, u16 outp, u16 type,
+ u8 *ver, u8 *hdr, u8 *cnt, u8 *len,
+ struct nvbios_ocfg *info)
+{
+ u16 data, idx = 0;
+ while ((data = nvbios_ocfg_parse(bios, outp, idx++, ver, hdr, cnt, len, info))) {
+ if (info->match == type)
+ break;
+ }
+ return data;
+}
+
+u16
+nvbios_oclk_match(struct nouveau_bios *bios, u16 cmp, u32 khz)
+{
+ while (cmp) {
+ if (khz / 10 >= nv_ro16(bios, cmp + 0x00))
+ return nv_ro16(bios, cmp + 0x02);
+ cmp += 0x04;
+ }
+ return 0x0000;
+}
diff --git a/drivers/gpu/drm/nouveau/core/subdev/bios/dp.c b/drivers/gpu/drm/nouveau/core/subdev/bios/dp.c
index 3cbc0f3e8d5e..663853bcca82 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/bios/dp.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/bios/dp.c
@@ -25,23 +25,29 @@
#include "subdev/bios.h"
#include "subdev/bios/bit.h"
-#include "subdev/bios/dcb.h"
#include "subdev/bios/dp.h"
-u16
-dp_table(struct nouveau_bios *bios, u8 *ver, u8 *hdr, u8 *cnt, u8 *len)
+static u16
+nvbios_dp_table(struct nouveau_bios *bios, u8 *ver, u8 *hdr, u8 *cnt, u8 *len)
{
- struct bit_entry bit_d;
+ struct bit_entry d;
- if (!bit_entry(bios, 'd', &bit_d)) {
- if (bit_d.version == 1) {
- u16 data = nv_ro16(bios, bit_d.offset);
+ if (!bit_entry(bios, 'd', &d)) {
+ if (d.version == 1 && d.length >= 2) {
+ u16 data = nv_ro16(bios, d.offset);
if (data) {
- *ver = nv_ro08(bios, data + 0);
- *hdr = nv_ro08(bios, data + 1);
- *len = nv_ro08(bios, data + 2);
- *cnt = nv_ro08(bios, data + 3);
- return data;
+ *ver = nv_ro08(bios, data + 0x00);
+ switch (*ver) {
+ case 0x21:
+ case 0x30:
+ case 0x40:
+ *hdr = nv_ro08(bios, data + 0x01);
+ *len = nv_ro08(bios, data + 0x02);
+ *cnt = nv_ro08(bios, data + 0x03);
+ return data;
+ default:
+ break;
+ }
}
}
}
@@ -49,28 +55,150 @@ dp_table(struct nouveau_bios *bios, u8 *ver, u8 *hdr, u8 *cnt, u8 *len)
return 0x0000;
}
+static u16
+nvbios_dpout_entry(struct nouveau_bios *bios, u8 idx,
+ u8 *ver, u8 *hdr, u8 *cnt, u8 *len)
+{
+ u16 data = nvbios_dp_table(bios, ver, hdr, cnt, len);
+ if (data && idx < *cnt) {
+ u16 outp = nv_ro16(bios, data + *hdr + idx * *len);
+ switch (*ver * !!outp) {
+ case 0x21:
+ case 0x30:
+ *hdr = nv_ro08(bios, data + 0x04);
+ *len = nv_ro08(bios, data + 0x05);
+ *cnt = nv_ro08(bios, outp + 0x04);
+ break;
+ case 0x40:
+ *hdr = nv_ro08(bios, data + 0x04);
+ *cnt = 0;
+ *len = 0;
+ break;
+ default:
+ break;
+ }
+ return outp;
+ }
+ *ver = 0x00;
+ return 0x0000;
+}
+
u16
-dp_outp(struct nouveau_bios *bios, u8 idx, u8 *ver, u8 *len)
+nvbios_dpout_parse(struct nouveau_bios *bios, u8 idx,
+ u8 *ver, u8 *hdr, u8 *cnt, u8 *len,
+ struct nvbios_dpout *info)
{
- u8 hdr, cnt;
- u16 table = dp_table(bios, ver, &hdr, &cnt, len);
- if (table && idx < cnt)
- return nv_ro16(bios, table + hdr + (idx * *len));
- return 0xffff;
+ u16 data = nvbios_dpout_entry(bios, idx, ver, hdr, cnt, len);
+ if (data && *ver) {
+ info->type = nv_ro16(bios, data + 0x00);
+ info->mask = nv_ro16(bios, data + 0x02);
+ switch (*ver) {
+ case 0x21:
+ case 0x30:
+ info->flags = nv_ro08(bios, data + 0x05);
+ info->script[0] = nv_ro16(bios, data + 0x06);
+ info->script[1] = nv_ro16(bios, data + 0x08);
+ info->lnkcmp = nv_ro16(bios, data + 0x0a);
+ info->script[2] = nv_ro16(bios, data + 0x0c);
+ info->script[3] = nv_ro16(bios, data + 0x0e);
+ info->script[4] = nv_ro16(bios, data + 0x10);
+ break;
+ case 0x40:
+ info->flags = nv_ro08(bios, data + 0x04);
+ info->script[0] = nv_ro16(bios, data + 0x05);
+ info->script[1] = nv_ro16(bios, data + 0x07);
+ info->lnkcmp = nv_ro16(bios, data + 0x09);
+ info->script[2] = nv_ro16(bios, data + 0x0b);
+ info->script[3] = nv_ro16(bios, data + 0x0d);
+ info->script[4] = nv_ro16(bios, data + 0x0f);
+ break;
+ default:
+ data = 0x0000;
+ break;
+ }
+ }
+ return data;
}
u16
-dp_outp_match(struct nouveau_bios *bios, struct dcb_output *outp,
- u8 *ver, u8 *len)
+nvbios_dpout_match(struct nouveau_bios *bios, u16 type, u16 mask,
+ u8 *ver, u8 *hdr, u8 *cnt, u8 *len,
+ struct nvbios_dpout *info)
{
- u8 idx = 0;
- u16 data;
- while ((data = dp_outp(bios, idx++, ver, len)) != 0xffff) {
- if (data) {
- u32 hash = nv_ro32(bios, data);
- if (dcb_hash_match(outp, hash))
- return data;
+ u16 data, idx = 0;
+ while ((data = nvbios_dpout_parse(bios, idx++, ver, hdr, cnt, len, info)) || *ver) {
+ if (data && info->type == type) {
+ if ((info->mask & mask) == mask)
+ break;
}
}
+ return data;
+}
+
+static u16
+nvbios_dpcfg_entry(struct nouveau_bios *bios, u16 outp, u8 idx,
+ u8 *ver, u8 *hdr, u8 *cnt, u8 *len)
+{
+ if (*ver >= 0x40) {
+ outp = nvbios_dp_table(bios, ver, hdr, cnt, len);
+ *hdr = *hdr + (*len * * cnt);
+ *len = nv_ro08(bios, outp + 0x06);
+ *cnt = nv_ro08(bios, outp + 0x07);
+ }
+
+ if (idx < *cnt)
+ return outp + *hdr + (idx * *len);
+
return 0x0000;
}
+
+u16
+nvbios_dpcfg_parse(struct nouveau_bios *bios, u16 outp, u8 idx,
+ u8 *ver, u8 *hdr, u8 *cnt, u8 *len,
+ struct nvbios_dpcfg *info)
+{
+ u16 data = nvbios_dpcfg_entry(bios, outp, idx, ver, hdr, cnt, len);
+ if (data) {
+ switch (*ver) {
+ case 0x21:
+ info->drv = nv_ro08(bios, data + 0x02);
+ info->pre = nv_ro08(bios, data + 0x03);
+ info->unk = nv_ro08(bios, data + 0x04);
+ break;
+ case 0x30:
+ case 0x40:
+ info->drv = nv_ro08(bios, data + 0x01);
+ info->pre = nv_ro08(bios, data + 0x02);
+ info->unk = nv_ro08(bios, data + 0x03);
+ break;
+ default:
+ data = 0x0000;
+ break;
+ }
+ }
+ return data;
+}
+
+u16
+nvbios_dpcfg_match(struct nouveau_bios *bios, u16 outp, u8 un, u8 vs, u8 pe,
+ u8 *ver, u8 *hdr, u8 *cnt, u8 *len,
+ struct nvbios_dpcfg *info)
+{
+ u8 idx = 0xff;
+ u16 data;
+
+ if (*ver >= 0x30) {
+ const u8 vsoff[] = { 0, 4, 7, 9 };
+ idx = (un * 10) + vsoff[vs] + pe;
+ } else {
+ while ((data = nvbios_dpcfg_entry(bios, outp, idx,
+ ver, hdr, cnt, len))) {
+ if (nv_ro08(bios, data + 0x00) == vs &&
+ nv_ro08(bios, data + 0x01) == pe)
+ break;
+ idx++;
+ }
+ }
+
+ return nvbios_dpcfg_parse(bios, outp, pe, ver, hdr, cnt, len, info);
+}
diff --git a/drivers/gpu/drm/nouveau/core/subdev/bios/gpio.c b/drivers/gpu/drm/nouveau/core/subdev/bios/gpio.c
index 4c9f1e508165..c90d4aa3ae4f 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/bios/gpio.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/bios/gpio.c
@@ -101,8 +101,8 @@ dcb_gpio_parse(struct nouveau_bios *bios, int idx, u8 func, u8 line,
}
/* DCB 2.2, fixed TVDAC GPIO data */
- if ((entry = dcb_table(bios, &ver, &hdr, &cnt, &len)) && ver >= 0x22) {
- if (func == DCB_GPIO_TVDAC0) {
+ if ((entry = dcb_table(bios, &ver, &hdr, &cnt, &len))) {
+ if (ver >= 0x22 && ver < 0x30 && func == DCB_GPIO_TVDAC0) {
u8 conf = nv_ro08(bios, entry - 5);
u8 addr = nv_ro08(bios, entry - 4);
if (conf & 0x01) {
diff --git a/drivers/gpu/drm/nouveau/core/subdev/bios/init.c b/drivers/gpu/drm/nouveau/core/subdev/bios/init.c
index 6be8c32f6e4c..ae168bbb86d8 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/bios/init.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/bios/init.c
@@ -743,9 +743,10 @@ static void
init_dp_condition(struct nvbios_init *init)
{
struct nouveau_bios *bios = init->bios;
+ struct nvbios_dpout info;
u8 cond = nv_ro08(bios, init->offset + 1);
u8 unkn = nv_ro08(bios, init->offset + 2);
- u8 ver, len;
+ u8 ver, hdr, cnt, len;
u16 data;
trace("DP_CONDITION\t0x%02x 0x%02x\n", cond, unkn);
@@ -759,10 +760,12 @@ init_dp_condition(struct nvbios_init *init)
case 1:
case 2:
if ( init->outp &&
- (data = dp_outp_match(bios, init->outp, &ver, &len))) {
- if (ver <= 0x40 && !(nv_ro08(bios, data + 5) & cond))
- init_exec_set(init, false);
- if (ver == 0x40 && !(nv_ro08(bios, data + 4) & cond))
+ (data = nvbios_dpout_match(bios, DCB_OUTPUT_DP,
+ (init->outp->or << 0) |
+ (init->outp->sorconf.link << 6),
+ &ver, &hdr, &cnt, &len, &info)))
+ {
+ if (!(info.flags & cond))
init_exec_set(init, false);
break;
}
diff --git a/drivers/gpu/drm/nouveau/core/subdev/clock/nva3.c b/drivers/gpu/drm/nouveau/core/subdev/clock/nva3.c
index cc8d7d162d7c..9068c98b96f6 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/clock/nva3.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/clock/nva3.c
@@ -66,6 +66,24 @@ nva3_clock_pll_set(struct nouveau_clock *clk, u32 type, u32 freq)
return ret;
}
+int
+nva3_clock_pll_calc(struct nouveau_clock *clock, struct nvbios_pll *info,
+ int clk, struct nouveau_pll_vals *pv)
+{
+ int ret, N, M, P;
+
+ ret = nva3_pll_calc(clock, info, clk, &N, NULL, &M, &P);
+
+ if (ret > 0) {
+ pv->refclk = info->refclk;
+ pv->N1 = N;
+ pv->M1 = M;
+ pv->log2P = P;
+ }
+ return ret;
+}
+
+
static int
nva3_clock_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
struct nouveau_oclass *oclass, void *data, u32 size,
@@ -80,6 +98,7 @@ nva3_clock_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
return ret;
priv->base.pll_set = nva3_clock_pll_set;
+ priv->base.pll_calc = nva3_clock_pll_calc;
return 0;
}
diff --git a/drivers/gpu/drm/nouveau/core/subdev/clock/nvc0.c b/drivers/gpu/drm/nouveau/core/subdev/clock/nvc0.c
index 5ccce0b17bf3..f6962c9b6c36 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/clock/nvc0.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/clock/nvc0.c
@@ -79,6 +79,7 @@ nvc0_clock_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
return ret;
priv->base.pll_set = nvc0_clock_pll_set;
+ priv->base.pll_calc = nva3_clock_pll_calc;
return 0;
}
diff --git a/drivers/gpu/drm/nouveau/core/subdev/device/base.c b/drivers/gpu/drm/nouveau/core/subdev/device/base.c
index ca9a4648bd8a..f8a7ed4166cf 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/device/base.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/device/base.c
@@ -25,7 +25,6 @@
#include <core/object.h>
#include <core/device.h>
#include <core/client.h>
-#include <core/device.h>
#include <core/option.h>
#include <core/class.h>
@@ -61,19 +60,24 @@ struct nouveau_devobj {
static const u64 disable_map[] = {
[NVDEV_SUBDEV_VBIOS] = NV_DEVICE_DISABLE_VBIOS,
+ [NVDEV_SUBDEV_DEVINIT] = NV_DEVICE_DISABLE_CORE,
[NVDEV_SUBDEV_GPIO] = NV_DEVICE_DISABLE_CORE,
[NVDEV_SUBDEV_I2C] = NV_DEVICE_DISABLE_CORE,
- [NVDEV_SUBDEV_DEVINIT] = NV_DEVICE_DISABLE_CORE,
+ [NVDEV_SUBDEV_CLOCK] = NV_DEVICE_DISABLE_CORE,
+ [NVDEV_SUBDEV_MXM] = NV_DEVICE_DISABLE_CORE,
[NVDEV_SUBDEV_MC] = NV_DEVICE_DISABLE_CORE,
[NVDEV_SUBDEV_TIMER] = NV_DEVICE_DISABLE_CORE,
[NVDEV_SUBDEV_FB] = NV_DEVICE_DISABLE_CORE,
- [NVDEV_SUBDEV_VM] = NV_DEVICE_DISABLE_CORE,
+ [NVDEV_SUBDEV_LTCG] = NV_DEVICE_DISABLE_CORE,
+ [NVDEV_SUBDEV_IBUS] = NV_DEVICE_DISABLE_CORE,
[NVDEV_SUBDEV_INSTMEM] = NV_DEVICE_DISABLE_CORE,
+ [NVDEV_SUBDEV_VM] = NV_DEVICE_DISABLE_CORE,
[NVDEV_SUBDEV_BAR] = NV_DEVICE_DISABLE_CORE,
[NVDEV_SUBDEV_VOLT] = NV_DEVICE_DISABLE_CORE,
- [NVDEV_SUBDEV_CLOCK] = NV_DEVICE_DISABLE_CORE,
[NVDEV_SUBDEV_THERM] = NV_DEVICE_DISABLE_CORE,
[NVDEV_ENGINE_DMAOBJ] = NV_DEVICE_DISABLE_CORE,
+ [NVDEV_ENGINE_FIFO] = NV_DEVICE_DISABLE_FIFO,
+ [NVDEV_ENGINE_SW] = NV_DEVICE_DISABLE_FIFO,
[NVDEV_ENGINE_GR] = NV_DEVICE_DISABLE_GRAPH,
[NVDEV_ENGINE_MPEG] = NV_DEVICE_DISABLE_MPEG,
[NVDEV_ENGINE_ME] = NV_DEVICE_DISABLE_ME,
@@ -84,7 +88,7 @@ static const u64 disable_map[] = {
[NVDEV_ENGINE_COPY0] = NV_DEVICE_DISABLE_COPY0,
[NVDEV_ENGINE_COPY1] = NV_DEVICE_DISABLE_COPY1,
[NVDEV_ENGINE_UNK1C1] = NV_DEVICE_DISABLE_UNK1C1,
- [NVDEV_ENGINE_FIFO] = NV_DEVICE_DISABLE_FIFO,
+ [NVDEV_ENGINE_VENC] = NV_DEVICE_DISABLE_VENC,
[NVDEV_ENGINE_DISP] = NV_DEVICE_DISABLE_DISP,
[NVDEV_SUBDEV_NR] = 0,
};
@@ -208,7 +212,7 @@ nouveau_devobj_ctor(struct nouveau_object *parent,
/* determine frequency of timing crystal */
if ( device->chipset < 0x17 ||
- (device->chipset >= 0x20 && device->chipset <= 0x25))
+ (device->chipset >= 0x20 && device->chipset < 0x25))
strap &= 0x00000040;
else
strap &= 0x00400040;
@@ -356,37 +360,37 @@ fail:
}
static u8
-nouveau_devobj_rd08(struct nouveau_object *object, u32 addr)
+nouveau_devobj_rd08(struct nouveau_object *object, u64 addr)
{
return nv_rd08(object->engine, addr);
}
static u16
-nouveau_devobj_rd16(struct nouveau_object *object, u32 addr)
+nouveau_devobj_rd16(struct nouveau_object *object, u64 addr)
{
return nv_rd16(object->engine, addr);
}
static u32
-nouveau_devobj_rd32(struct nouveau_object *object, u32 addr)
+nouveau_devobj_rd32(struct nouveau_object *object, u64 addr)
{
return nv_rd32(object->engine, addr);
}
static void
-nouveau_devobj_wr08(struct nouveau_object *object, u32 addr, u8 data)
+nouveau_devobj_wr08(struct nouveau_object *object, u64 addr, u8 data)
{
nv_wr08(object->engine, addr, data);
}
static void
-nouveau_devobj_wr16(struct nouveau_object *object, u32 addr, u16 data)
+nouveau_devobj_wr16(struct nouveau_object *object, u64 addr, u16 data)
{
nv_wr16(object->engine, addr, data);
}
static void
-nouveau_devobj_wr32(struct nouveau_object *object, u32 addr, u32 data)
+nouveau_devobj_wr32(struct nouveau_object *object, u64 addr, u32 data)
{
nv_wr32(object->engine, addr, data);
}
diff --git a/drivers/gpu/drm/nouveau/core/subdev/device/nv10.c b/drivers/gpu/drm/nouveau/core/subdev/device/nv10.c
index f09accfd0e31..9c40b0fb23f6 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/device/nv10.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/device/nv10.c
@@ -105,7 +105,7 @@ nv10_identify(struct nouveau_device *device)
device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv1a_devinit_oclass;
device->oclass[NVDEV_SUBDEV_MC ] = &nv04_mc_oclass;
device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
- device->oclass[NVDEV_SUBDEV_FB ] = &nv10_fb_oclass;
+ device->oclass[NVDEV_SUBDEV_FB ] = &nv1a_fb_oclass;
device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv04_instmem_oclass;
device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass;
device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
@@ -159,7 +159,7 @@ nv10_identify(struct nouveau_device *device)
device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv1a_devinit_oclass;
device->oclass[NVDEV_SUBDEV_MC ] = &nv04_mc_oclass;
device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
- device->oclass[NVDEV_SUBDEV_FB ] = &nv10_fb_oclass;
+ device->oclass[NVDEV_SUBDEV_FB ] = &nv1a_fb_oclass;
device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv04_instmem_oclass;
device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass;
device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
diff --git a/drivers/gpu/drm/nouveau/core/subdev/device/nv20.c b/drivers/gpu/drm/nouveau/core/subdev/device/nv20.c
index 5fa58b7369b5..74f88f48e1c2 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/device/nv20.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/device/nv20.c
@@ -72,7 +72,7 @@ nv20_identify(struct nouveau_device *device)
device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv20_devinit_oclass;
device->oclass[NVDEV_SUBDEV_MC ] = &nv04_mc_oclass;
device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
- device->oclass[NVDEV_SUBDEV_FB ] = &nv20_fb_oclass;
+ device->oclass[NVDEV_SUBDEV_FB ] = &nv25_fb_oclass;
device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv04_instmem_oclass;
device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass;
device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
@@ -90,7 +90,7 @@ nv20_identify(struct nouveau_device *device)
device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv20_devinit_oclass;
device->oclass[NVDEV_SUBDEV_MC ] = &nv04_mc_oclass;
device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
- device->oclass[NVDEV_SUBDEV_FB ] = &nv20_fb_oclass;
+ device->oclass[NVDEV_SUBDEV_FB ] = &nv25_fb_oclass;
device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv04_instmem_oclass;
device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass;
device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
@@ -108,7 +108,7 @@ nv20_identify(struct nouveau_device *device)
device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv20_devinit_oclass;
device->oclass[NVDEV_SUBDEV_MC ] = &nv04_mc_oclass;
device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
- device->oclass[NVDEV_SUBDEV_FB ] = &nv20_fb_oclass;
+ device->oclass[NVDEV_SUBDEV_FB ] = &nv25_fb_oclass;
device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv04_instmem_oclass;
device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass;
device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
diff --git a/drivers/gpu/drm/nouveau/core/subdev/device/nv30.c b/drivers/gpu/drm/nouveau/core/subdev/device/nv30.c
index 7f4b8fe6cccc..0ac1b2c4f61d 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/device/nv30.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/device/nv30.c
@@ -72,7 +72,7 @@ nv30_identify(struct nouveau_device *device)
device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv20_devinit_oclass;
device->oclass[NVDEV_SUBDEV_MC ] = &nv04_mc_oclass;
device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
- device->oclass[NVDEV_SUBDEV_FB ] = &nv30_fb_oclass;
+ device->oclass[NVDEV_SUBDEV_FB ] = &nv35_fb_oclass;
device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv04_instmem_oclass;
device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass;
device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
@@ -109,7 +109,7 @@ nv30_identify(struct nouveau_device *device)
device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv20_devinit_oclass;
device->oclass[NVDEV_SUBDEV_MC ] = &nv04_mc_oclass;
device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
- device->oclass[NVDEV_SUBDEV_FB ] = &nv30_fb_oclass;
+ device->oclass[NVDEV_SUBDEV_FB ] = &nv36_fb_oclass;
device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv04_instmem_oclass;
device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass;
device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
@@ -128,7 +128,7 @@ nv30_identify(struct nouveau_device *device)
device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv10_devinit_oclass;
device->oclass[NVDEV_SUBDEV_MC ] = &nv04_mc_oclass;
device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
- device->oclass[NVDEV_SUBDEV_FB ] = &nv30_fb_oclass;
+ device->oclass[NVDEV_SUBDEV_FB ] = &nv10_fb_oclass;
device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv04_instmem_oclass;
device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass;
device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
diff --git a/drivers/gpu/drm/nouveau/core/subdev/device/nv40.c b/drivers/gpu/drm/nouveau/core/subdev/device/nv40.c
index 42deadca0f0a..41d59689a021 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/device/nv40.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/device/nv40.c
@@ -76,7 +76,7 @@ nv40_identify(struct nouveau_device *device)
device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv1a_devinit_oclass;
device->oclass[NVDEV_SUBDEV_MC ] = &nv04_mc_oclass;
device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
- device->oclass[NVDEV_SUBDEV_FB ] = &nv40_fb_oclass;
+ device->oclass[NVDEV_SUBDEV_FB ] = &nv41_fb_oclass;
device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv40_instmem_oclass;
device->oclass[NVDEV_SUBDEV_VM ] = &nv41_vmmgr_oclass;
device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
@@ -96,7 +96,7 @@ nv40_identify(struct nouveau_device *device)
device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv1a_devinit_oclass;
device->oclass[NVDEV_SUBDEV_MC ] = &nv04_mc_oclass;
device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
- device->oclass[NVDEV_SUBDEV_FB ] = &nv40_fb_oclass;
+ device->oclass[NVDEV_SUBDEV_FB ] = &nv41_fb_oclass;
device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv40_instmem_oclass;
device->oclass[NVDEV_SUBDEV_VM ] = &nv41_vmmgr_oclass;
device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
@@ -116,7 +116,7 @@ nv40_identify(struct nouveau_device *device)
device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv1a_devinit_oclass;
device->oclass[NVDEV_SUBDEV_MC ] = &nv04_mc_oclass;
device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
- device->oclass[NVDEV_SUBDEV_FB ] = &nv40_fb_oclass;
+ device->oclass[NVDEV_SUBDEV_FB ] = &nv41_fb_oclass;
device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv40_instmem_oclass;
device->oclass[NVDEV_SUBDEV_VM ] = &nv41_vmmgr_oclass;
device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
@@ -156,7 +156,7 @@ nv40_identify(struct nouveau_device *device)
device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv1a_devinit_oclass;
device->oclass[NVDEV_SUBDEV_MC ] = &nv04_mc_oclass;
device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
- device->oclass[NVDEV_SUBDEV_FB ] = &nv40_fb_oclass;
+ device->oclass[NVDEV_SUBDEV_FB ] = &nv47_fb_oclass;
device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv40_instmem_oclass;
device->oclass[NVDEV_SUBDEV_VM ] = &nv41_vmmgr_oclass;
device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
@@ -176,7 +176,7 @@ nv40_identify(struct nouveau_device *device)
device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv1a_devinit_oclass;
device->oclass[NVDEV_SUBDEV_MC ] = &nv04_mc_oclass;
device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
- device->oclass[NVDEV_SUBDEV_FB ] = &nv40_fb_oclass;
+ device->oclass[NVDEV_SUBDEV_FB ] = &nv49_fb_oclass;
device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv40_instmem_oclass;
device->oclass[NVDEV_SUBDEV_VM ] = &nv41_vmmgr_oclass;
device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
@@ -196,7 +196,7 @@ nv40_identify(struct nouveau_device *device)
device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv1a_devinit_oclass;
device->oclass[NVDEV_SUBDEV_MC ] = &nv04_mc_oclass;
device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
- device->oclass[NVDEV_SUBDEV_FB ] = &nv40_fb_oclass;
+ device->oclass[NVDEV_SUBDEV_FB ] = &nv49_fb_oclass;
device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv40_instmem_oclass;
device->oclass[NVDEV_SUBDEV_VM ] = &nv41_vmmgr_oclass;
device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
@@ -216,7 +216,7 @@ nv40_identify(struct nouveau_device *device)
device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv1a_devinit_oclass;
device->oclass[NVDEV_SUBDEV_MC ] = &nv44_mc_oclass;
device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
- device->oclass[NVDEV_SUBDEV_FB ] = &nv40_fb_oclass;
+ device->oclass[NVDEV_SUBDEV_FB ] = &nv44_fb_oclass;
device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv40_instmem_oclass;
device->oclass[NVDEV_SUBDEV_VM ] = &nv44_vmmgr_oclass;
device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
@@ -236,7 +236,7 @@ nv40_identify(struct nouveau_device *device)
device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv1a_devinit_oclass;
device->oclass[NVDEV_SUBDEV_MC ] = &nv44_mc_oclass;
device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
- device->oclass[NVDEV_SUBDEV_FB ] = &nv40_fb_oclass;
+ device->oclass[NVDEV_SUBDEV_FB ] = &nv46_fb_oclass;
device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv40_instmem_oclass;
device->oclass[NVDEV_SUBDEV_VM ] = &nv44_vmmgr_oclass;
device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
@@ -256,7 +256,7 @@ nv40_identify(struct nouveau_device *device)
device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv1a_devinit_oclass;
device->oclass[NVDEV_SUBDEV_MC ] = &nv44_mc_oclass;
device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
- device->oclass[NVDEV_SUBDEV_FB ] = &nv40_fb_oclass;
+ device->oclass[NVDEV_SUBDEV_FB ] = &nv44_fb_oclass;
device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv40_instmem_oclass;
device->oclass[NVDEV_SUBDEV_VM ] = &nv44_vmmgr_oclass;
device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
@@ -276,7 +276,7 @@ nv40_identify(struct nouveau_device *device)
device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv1a_devinit_oclass;
device->oclass[NVDEV_SUBDEV_MC ] = &nv44_mc_oclass;
device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
- device->oclass[NVDEV_SUBDEV_FB ] = &nv40_fb_oclass;
+ device->oclass[NVDEV_SUBDEV_FB ] = &nv46_fb_oclass;
device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv40_instmem_oclass;
device->oclass[NVDEV_SUBDEV_VM ] = &nv44_vmmgr_oclass;
device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
@@ -296,7 +296,7 @@ nv40_identify(struct nouveau_device *device)
device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv1a_devinit_oclass;
device->oclass[NVDEV_SUBDEV_MC ] = &nv44_mc_oclass;
device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
- device->oclass[NVDEV_SUBDEV_FB ] = &nv40_fb_oclass;
+ device->oclass[NVDEV_SUBDEV_FB ] = &nv4e_fb_oclass;
device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv40_instmem_oclass;
device->oclass[NVDEV_SUBDEV_VM ] = &nv44_vmmgr_oclass;
device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
@@ -316,7 +316,7 @@ nv40_identify(struct nouveau_device *device)
device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv1a_devinit_oclass;
device->oclass[NVDEV_SUBDEV_MC ] = &nv44_mc_oclass;
device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
- device->oclass[NVDEV_SUBDEV_FB ] = &nv40_fb_oclass;
+ device->oclass[NVDEV_SUBDEV_FB ] = &nv46_fb_oclass;
device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv40_instmem_oclass;
device->oclass[NVDEV_SUBDEV_VM ] = &nv44_vmmgr_oclass;
device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
@@ -336,7 +336,7 @@ nv40_identify(struct nouveau_device *device)
device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv1a_devinit_oclass;
device->oclass[NVDEV_SUBDEV_MC ] = &nv44_mc_oclass;
device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
- device->oclass[NVDEV_SUBDEV_FB ] = &nv40_fb_oclass;
+ device->oclass[NVDEV_SUBDEV_FB ] = &nv46_fb_oclass;
device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv40_instmem_oclass;
device->oclass[NVDEV_SUBDEV_VM ] = &nv44_vmmgr_oclass;
device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
@@ -356,7 +356,7 @@ nv40_identify(struct nouveau_device *device)
device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv1a_devinit_oclass;
device->oclass[NVDEV_SUBDEV_MC ] = &nv44_mc_oclass;
device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
- device->oclass[NVDEV_SUBDEV_FB ] = &nv40_fb_oclass;
+ device->oclass[NVDEV_SUBDEV_FB ] = &nv46_fb_oclass;
device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv40_instmem_oclass;
device->oclass[NVDEV_SUBDEV_VM ] = &nv44_vmmgr_oclass;
device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
diff --git a/drivers/gpu/drm/nouveau/core/subdev/device/nv50.c b/drivers/gpu/drm/nouveau/core/subdev/device/nv50.c
index fec3bcc9a6fc..6ccfd8585ba2 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/device/nv50.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/device/nv50.c
@@ -98,7 +98,7 @@ nv50_identify(struct nouveau_device *device)
device->oclass[NVDEV_ENGINE_VP ] = &nv84_vp_oclass;
device->oclass[NVDEV_ENGINE_CRYPT ] = &nv84_crypt_oclass;
device->oclass[NVDEV_ENGINE_BSP ] = &nv84_bsp_oclass;
- device->oclass[NVDEV_ENGINE_DISP ] = &nv50_disp_oclass;
+ device->oclass[NVDEV_ENGINE_DISP ] = &nv84_disp_oclass;
break;
case 0x86:
device->cname = "G86";
@@ -123,7 +123,7 @@ nv50_identify(struct nouveau_device *device)
device->oclass[NVDEV_ENGINE_VP ] = &nv84_vp_oclass;
device->oclass[NVDEV_ENGINE_CRYPT ] = &nv84_crypt_oclass;
device->oclass[NVDEV_ENGINE_BSP ] = &nv84_bsp_oclass;
- device->oclass[NVDEV_ENGINE_DISP ] = &nv50_disp_oclass;
+ device->oclass[NVDEV_ENGINE_DISP ] = &nv84_disp_oclass;
break;
case 0x92:
device->cname = "G92";
@@ -148,7 +148,7 @@ nv50_identify(struct nouveau_device *device)
device->oclass[NVDEV_ENGINE_VP ] = &nv84_vp_oclass;
device->oclass[NVDEV_ENGINE_CRYPT ] = &nv84_crypt_oclass;
device->oclass[NVDEV_ENGINE_BSP ] = &nv84_bsp_oclass;
- device->oclass[NVDEV_ENGINE_DISP ] = &nv50_disp_oclass;
+ device->oclass[NVDEV_ENGINE_DISP ] = &nv84_disp_oclass;
break;
case 0x94:
device->cname = "G94";
@@ -173,7 +173,7 @@ nv50_identify(struct nouveau_device *device)
device->oclass[NVDEV_ENGINE_VP ] = &nv84_vp_oclass;
device->oclass[NVDEV_ENGINE_CRYPT ] = &nv84_crypt_oclass;
device->oclass[NVDEV_ENGINE_BSP ] = &nv84_bsp_oclass;
- device->oclass[NVDEV_ENGINE_DISP ] = &nv50_disp_oclass;
+ device->oclass[NVDEV_ENGINE_DISP ] = &nv94_disp_oclass;
break;
case 0x96:
device->cname = "G96";
@@ -198,7 +198,7 @@ nv50_identify(struct nouveau_device *device)
device->oclass[NVDEV_ENGINE_VP ] = &nv84_vp_oclass;
device->oclass[NVDEV_ENGINE_CRYPT ] = &nv84_crypt_oclass;
device->oclass[NVDEV_ENGINE_BSP ] = &nv84_bsp_oclass;
- device->oclass[NVDEV_ENGINE_DISP ] = &nv50_disp_oclass;
+ device->oclass[NVDEV_ENGINE_DISP ] = &nv94_disp_oclass;
break;
case 0x98:
device->cname = "G98";
@@ -223,7 +223,7 @@ nv50_identify(struct nouveau_device *device)
device->oclass[NVDEV_ENGINE_CRYPT ] = &nv98_crypt_oclass;
device->oclass[NVDEV_ENGINE_BSP ] = &nv84_bsp_oclass;
device->oclass[NVDEV_ENGINE_PPP ] = &nv98_ppp_oclass;
- device->oclass[NVDEV_ENGINE_DISP ] = &nv50_disp_oclass;
+ device->oclass[NVDEV_ENGINE_DISP ] = &nv94_disp_oclass;
break;
case 0xa0:
device->cname = "G200";
@@ -248,7 +248,7 @@ nv50_identify(struct nouveau_device *device)
device->oclass[NVDEV_ENGINE_VP ] = &nv84_vp_oclass;
device->oclass[NVDEV_ENGINE_CRYPT ] = &nv84_crypt_oclass;
device->oclass[NVDEV_ENGINE_BSP ] = &nv84_bsp_oclass;
- device->oclass[NVDEV_ENGINE_DISP ] = &nv50_disp_oclass;
+ device->oclass[NVDEV_ENGINE_DISP ] = &nva0_disp_oclass;
break;
case 0xaa:
device->cname = "MCP77/MCP78";
@@ -273,7 +273,7 @@ nv50_identify(struct nouveau_device *device)
device->oclass[NVDEV_ENGINE_CRYPT ] = &nv98_crypt_oclass;
device->oclass[NVDEV_ENGINE_BSP ] = &nv84_bsp_oclass;
device->oclass[NVDEV_ENGINE_PPP ] = &nv98_ppp_oclass;
- device->oclass[NVDEV_ENGINE_DISP ] = &nv50_disp_oclass;
+ device->oclass[NVDEV_ENGINE_DISP ] = &nv94_disp_oclass;
break;
case 0xac:
device->cname = "MCP79/MCP7A";
@@ -298,7 +298,7 @@ nv50_identify(struct nouveau_device *device)
device->oclass[NVDEV_ENGINE_CRYPT ] = &nv98_crypt_oclass;
device->oclass[NVDEV_ENGINE_BSP ] = &nv84_bsp_oclass;
device->oclass[NVDEV_ENGINE_PPP ] = &nv98_ppp_oclass;
- device->oclass[NVDEV_ENGINE_DISP ] = &nv50_disp_oclass;
+ device->oclass[NVDEV_ENGINE_DISP ] = &nv94_disp_oclass;
break;
case 0xa3:
device->cname = "GT215";
@@ -324,7 +324,7 @@ nv50_identify(struct nouveau_device *device)
device->oclass[NVDEV_ENGINE_BSP ] = &nv84_bsp_oclass;
device->oclass[NVDEV_ENGINE_PPP ] = &nv98_ppp_oclass;
device->oclass[NVDEV_ENGINE_COPY0 ] = &nva3_copy_oclass;
- device->oclass[NVDEV_ENGINE_DISP ] = &nv50_disp_oclass;
+ device->oclass[NVDEV_ENGINE_DISP ] = &nva3_disp_oclass;
break;
case 0xa5:
device->cname = "GT216";
@@ -349,7 +349,7 @@ nv50_identify(struct nouveau_device *device)
device->oclass[NVDEV_ENGINE_BSP ] = &nv84_bsp_oclass;
device->oclass[NVDEV_ENGINE_PPP ] = &nv98_ppp_oclass;
device->oclass[NVDEV_ENGINE_COPY0 ] = &nva3_copy_oclass;
- device->oclass[NVDEV_ENGINE_DISP ] = &nv50_disp_oclass;
+ device->oclass[NVDEV_ENGINE_DISP ] = &nva3_disp_oclass;
break;
case 0xa8:
device->cname = "GT218";
@@ -374,7 +374,7 @@ nv50_identify(struct nouveau_device *device)
device->oclass[NVDEV_ENGINE_BSP ] = &nv84_bsp_oclass;
device->oclass[NVDEV_ENGINE_PPP ] = &nv98_ppp_oclass;
device->oclass[NVDEV_ENGINE_COPY0 ] = &nva3_copy_oclass;
- device->oclass[NVDEV_ENGINE_DISP ] = &nv50_disp_oclass;
+ device->oclass[NVDEV_ENGINE_DISP ] = &nva3_disp_oclass;
break;
case 0xaf:
device->cname = "MCP89";
@@ -399,7 +399,7 @@ nv50_identify(struct nouveau_device *device)
device->oclass[NVDEV_ENGINE_BSP ] = &nv84_bsp_oclass;
device->oclass[NVDEV_ENGINE_PPP ] = &nv98_ppp_oclass;
device->oclass[NVDEV_ENGINE_COPY0 ] = &nva3_copy_oclass;
- device->oclass[NVDEV_ENGINE_DISP ] = &nv50_disp_oclass;
+ device->oclass[NVDEV_ENGINE_DISP ] = &nva3_disp_oclass;
break;
default:
nv_fatal(device, "unknown Tesla chipset\n");
diff --git a/drivers/gpu/drm/nouveau/core/subdev/device/nvc0.c b/drivers/gpu/drm/nouveau/core/subdev/device/nvc0.c
index 6697f0f9c293..f0461685a422 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/device/nvc0.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/device/nvc0.c
@@ -74,12 +74,12 @@ nvc0_identify(struct nouveau_device *device)
device->oclass[NVDEV_ENGINE_FIFO ] = &nvc0_fifo_oclass;
device->oclass[NVDEV_ENGINE_SW ] = &nvc0_software_oclass;
device->oclass[NVDEV_ENGINE_GR ] = &nvc0_graph_oclass;
- device->oclass[NVDEV_ENGINE_VP ] = &nv84_vp_oclass;
- device->oclass[NVDEV_ENGINE_BSP ] = &nv84_bsp_oclass;
- device->oclass[NVDEV_ENGINE_PPP ] = &nv98_ppp_oclass;
+ device->oclass[NVDEV_ENGINE_VP ] = &nvc0_vp_oclass;
+ device->oclass[NVDEV_ENGINE_BSP ] = &nvc0_bsp_oclass;
+ device->oclass[NVDEV_ENGINE_PPP ] = &nvc0_ppp_oclass;
device->oclass[NVDEV_ENGINE_COPY0 ] = &nvc0_copy0_oclass;
device->oclass[NVDEV_ENGINE_COPY1 ] = &nvc0_copy1_oclass;
- device->oclass[NVDEV_ENGINE_DISP ] = &nv50_disp_oclass;
+ device->oclass[NVDEV_ENGINE_DISP ] = &nva3_disp_oclass;
break;
case 0xc4:
device->cname = "GF104";
@@ -102,12 +102,12 @@ nvc0_identify(struct nouveau_device *device)
device->oclass[NVDEV_ENGINE_FIFO ] = &nvc0_fifo_oclass;
device->oclass[NVDEV_ENGINE_SW ] = &nvc0_software_oclass;
device->oclass[NVDEV_ENGINE_GR ] = &nvc0_graph_oclass;
- device->oclass[NVDEV_ENGINE_VP ] = &nv84_vp_oclass;
- device->oclass[NVDEV_ENGINE_BSP ] = &nv84_bsp_oclass;
- device->oclass[NVDEV_ENGINE_PPP ] = &nv98_ppp_oclass;
+ device->oclass[NVDEV_ENGINE_VP ] = &nvc0_vp_oclass;
+ device->oclass[NVDEV_ENGINE_BSP ] = &nvc0_bsp_oclass;
+ device->oclass[NVDEV_ENGINE_PPP ] = &nvc0_ppp_oclass;
device->oclass[NVDEV_ENGINE_COPY0 ] = &nvc0_copy0_oclass;
device->oclass[NVDEV_ENGINE_COPY1 ] = &nvc0_copy1_oclass;
- device->oclass[NVDEV_ENGINE_DISP ] = &nv50_disp_oclass;
+ device->oclass[NVDEV_ENGINE_DISP ] = &nva3_disp_oclass;
break;
case 0xc3:
device->cname = "GF106";
@@ -130,12 +130,12 @@ nvc0_identify(struct nouveau_device *device)
device->oclass[NVDEV_ENGINE_FIFO ] = &nvc0_fifo_oclass;
device->oclass[NVDEV_ENGINE_SW ] = &nvc0_software_oclass;
device->oclass[NVDEV_ENGINE_GR ] = &nvc0_graph_oclass;
- device->oclass[NVDEV_ENGINE_VP ] = &nv84_vp_oclass;
- device->oclass[NVDEV_ENGINE_BSP ] = &nv84_bsp_oclass;
- device->oclass[NVDEV_ENGINE_PPP ] = &nv98_ppp_oclass;
+ device->oclass[NVDEV_ENGINE_VP ] = &nvc0_vp_oclass;
+ device->oclass[NVDEV_ENGINE_BSP ] = &nvc0_bsp_oclass;
+ device->oclass[NVDEV_ENGINE_PPP ] = &nvc0_ppp_oclass;
device->oclass[NVDEV_ENGINE_COPY0 ] = &nvc0_copy0_oclass;
device->oclass[NVDEV_ENGINE_COPY1 ] = &nvc0_copy1_oclass;
- device->oclass[NVDEV_ENGINE_DISP ] = &nv50_disp_oclass;
+ device->oclass[NVDEV_ENGINE_DISP ] = &nva3_disp_oclass;
break;
case 0xce:
device->cname = "GF114";
@@ -158,12 +158,12 @@ nvc0_identify(struct nouveau_device *device)
device->oclass[NVDEV_ENGINE_FIFO ] = &nvc0_fifo_oclass;
device->oclass[NVDEV_ENGINE_SW ] = &nvc0_software_oclass;
device->oclass[NVDEV_ENGINE_GR ] = &nvc0_graph_oclass;
- device->oclass[NVDEV_ENGINE_VP ] = &nv84_vp_oclass;
- device->oclass[NVDEV_ENGINE_BSP ] = &nv84_bsp_oclass;
- device->oclass[NVDEV_ENGINE_PPP ] = &nv98_ppp_oclass;
+ device->oclass[NVDEV_ENGINE_VP ] = &nvc0_vp_oclass;
+ device->oclass[NVDEV_ENGINE_BSP ] = &nvc0_bsp_oclass;
+ device->oclass[NVDEV_ENGINE_PPP ] = &nvc0_ppp_oclass;
device->oclass[NVDEV_ENGINE_COPY0 ] = &nvc0_copy0_oclass;
device->oclass[NVDEV_ENGINE_COPY1 ] = &nvc0_copy1_oclass;
- device->oclass[NVDEV_ENGINE_DISP ] = &nv50_disp_oclass;
+ device->oclass[NVDEV_ENGINE_DISP ] = &nva3_disp_oclass;
break;
case 0xcf:
device->cname = "GF116";
@@ -186,12 +186,12 @@ nvc0_identify(struct nouveau_device *device)
device->oclass[NVDEV_ENGINE_FIFO ] = &nvc0_fifo_oclass;
device->oclass[NVDEV_ENGINE_SW ] = &nvc0_software_oclass;
device->oclass[NVDEV_ENGINE_GR ] = &nvc0_graph_oclass;
- device->oclass[NVDEV_ENGINE_VP ] = &nv84_vp_oclass;
- device->oclass[NVDEV_ENGINE_BSP ] = &nv84_bsp_oclass;
- device->oclass[NVDEV_ENGINE_PPP ] = &nv98_ppp_oclass;
+ device->oclass[NVDEV_ENGINE_VP ] = &nvc0_vp_oclass;
+ device->oclass[NVDEV_ENGINE_BSP ] = &nvc0_bsp_oclass;
+ device->oclass[NVDEV_ENGINE_PPP ] = &nvc0_ppp_oclass;
device->oclass[NVDEV_ENGINE_COPY0 ] = &nvc0_copy0_oclass;
device->oclass[NVDEV_ENGINE_COPY1 ] = &nvc0_copy1_oclass;
- device->oclass[NVDEV_ENGINE_DISP ] = &nv50_disp_oclass;
+ device->oclass[NVDEV_ENGINE_DISP ] = &nva3_disp_oclass;
break;
case 0xc1:
device->cname = "GF108";
@@ -214,12 +214,12 @@ nvc0_identify(struct nouveau_device *device)
device->oclass[NVDEV_ENGINE_FIFO ] = &nvc0_fifo_oclass;
device->oclass[NVDEV_ENGINE_SW ] = &nvc0_software_oclass;
device->oclass[NVDEV_ENGINE_GR ] = &nvc0_graph_oclass;
- device->oclass[NVDEV_ENGINE_VP ] = &nv84_vp_oclass;
- device->oclass[NVDEV_ENGINE_BSP ] = &nv84_bsp_oclass;
- device->oclass[NVDEV_ENGINE_PPP ] = &nv98_ppp_oclass;
+ device->oclass[NVDEV_ENGINE_VP ] = &nvc0_vp_oclass;
+ device->oclass[NVDEV_ENGINE_BSP ] = &nvc0_bsp_oclass;
+ device->oclass[NVDEV_ENGINE_PPP ] = &nvc0_ppp_oclass;
device->oclass[NVDEV_ENGINE_COPY0 ] = &nvc0_copy0_oclass;
device->oclass[NVDEV_ENGINE_COPY1 ] = &nvc0_copy1_oclass;
- device->oclass[NVDEV_ENGINE_DISP ] = &nv50_disp_oclass;
+ device->oclass[NVDEV_ENGINE_DISP ] = &nva3_disp_oclass;
break;
case 0xc8:
device->cname = "GF110";
@@ -242,12 +242,12 @@ nvc0_identify(struct nouveau_device *device)
device->oclass[NVDEV_ENGINE_FIFO ] = &nvc0_fifo_oclass;
device->oclass[NVDEV_ENGINE_SW ] = &nvc0_software_oclass;
device->oclass[NVDEV_ENGINE_GR ] = &nvc0_graph_oclass;
- device->oclass[NVDEV_ENGINE_VP ] = &nv84_vp_oclass;
- device->oclass[NVDEV_ENGINE_BSP ] = &nv84_bsp_oclass;
- device->oclass[NVDEV_ENGINE_PPP ] = &nv98_ppp_oclass;
+ device->oclass[NVDEV_ENGINE_VP ] = &nvc0_vp_oclass;
+ device->oclass[NVDEV_ENGINE_BSP ] = &nvc0_bsp_oclass;
+ device->oclass[NVDEV_ENGINE_PPP ] = &nvc0_ppp_oclass;
device->oclass[NVDEV_ENGINE_COPY0 ] = &nvc0_copy0_oclass;
device->oclass[NVDEV_ENGINE_COPY1 ] = &nvc0_copy1_oclass;
- device->oclass[NVDEV_ENGINE_DISP ] = &nv50_disp_oclass;
+ device->oclass[NVDEV_ENGINE_DISP ] = &nva3_disp_oclass;
break;
case 0xd9:
device->cname = "GF119";
@@ -266,13 +266,13 @@ nvc0_identify(struct nouveau_device *device)
device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv50_instmem_oclass;
device->oclass[NVDEV_SUBDEV_VM ] = &nvc0_vmmgr_oclass;
device->oclass[NVDEV_SUBDEV_BAR ] = &nvc0_bar_oclass;
- device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nvc0_dmaeng_oclass;
+ device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nvd0_dmaeng_oclass;
device->oclass[NVDEV_ENGINE_FIFO ] = &nvc0_fifo_oclass;
device->oclass[NVDEV_ENGINE_SW ] = &nvc0_software_oclass;
device->oclass[NVDEV_ENGINE_GR ] = &nvc0_graph_oclass;
- device->oclass[NVDEV_ENGINE_VP ] = &nv84_vp_oclass;
- device->oclass[NVDEV_ENGINE_BSP ] = &nv84_bsp_oclass;
- device->oclass[NVDEV_ENGINE_PPP ] = &nv98_ppp_oclass;
+ device->oclass[NVDEV_ENGINE_VP ] = &nvc0_vp_oclass;
+ device->oclass[NVDEV_ENGINE_BSP ] = &nvc0_bsp_oclass;
+ device->oclass[NVDEV_ENGINE_PPP ] = &nvc0_ppp_oclass;
device->oclass[NVDEV_ENGINE_COPY0 ] = &nvc0_copy0_oclass;
device->oclass[NVDEV_ENGINE_DISP ] = &nvd0_disp_oclass;
break;
diff --git a/drivers/gpu/drm/nouveau/core/subdev/device/nve0.c b/drivers/gpu/drm/nouveau/core/subdev/device/nve0.c
index 4a280b7ab853..9b7881e76634 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/device/nve0.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/device/nve0.c
@@ -45,6 +45,9 @@
#include <engine/graph.h>
#include <engine/disp.h>
#include <engine/copy.h>
+#include <engine/bsp.h>
+#include <engine/vp.h>
+#include <engine/ppp.h>
int
nve0_identify(struct nouveau_device *device)
@@ -67,13 +70,16 @@ nve0_identify(struct nouveau_device *device)
device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv50_instmem_oclass;
device->oclass[NVDEV_SUBDEV_VM ] = &nvc0_vmmgr_oclass;
device->oclass[NVDEV_SUBDEV_BAR ] = &nvc0_bar_oclass;
- device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nvc0_dmaeng_oclass;
+ device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nvd0_dmaeng_oclass;
device->oclass[NVDEV_ENGINE_FIFO ] = &nve0_fifo_oclass;
device->oclass[NVDEV_ENGINE_SW ] = &nvc0_software_oclass;
device->oclass[NVDEV_ENGINE_GR ] = &nve0_graph_oclass;
- device->oclass[NVDEV_ENGINE_DISP ] = &nvd0_disp_oclass;
+ device->oclass[NVDEV_ENGINE_DISP ] = &nve0_disp_oclass;
device->oclass[NVDEV_ENGINE_COPY0 ] = &nve0_copy0_oclass;
device->oclass[NVDEV_ENGINE_COPY1 ] = &nve0_copy1_oclass;
+ device->oclass[NVDEV_ENGINE_BSP ] = &nve0_bsp_oclass;
+ device->oclass[NVDEV_ENGINE_VP ] = &nve0_vp_oclass;
+ device->oclass[NVDEV_ENGINE_PPP ] = &nvc0_ppp_oclass;
break;
case 0xe7:
device->cname = "GK107";
@@ -92,13 +98,16 @@ nve0_identify(struct nouveau_device *device)
device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv50_instmem_oclass;
device->oclass[NVDEV_SUBDEV_VM ] = &nvc0_vmmgr_oclass;
device->oclass[NVDEV_SUBDEV_BAR ] = &nvc0_bar_oclass;
- device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nvc0_dmaeng_oclass;
+ device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nvd0_dmaeng_oclass;
device->oclass[NVDEV_ENGINE_FIFO ] = &nve0_fifo_oclass;
device->oclass[NVDEV_ENGINE_SW ] = &nvc0_software_oclass;
device->oclass[NVDEV_ENGINE_GR ] = &nve0_graph_oclass;
- device->oclass[NVDEV_ENGINE_DISP ] = &nvd0_disp_oclass;
+ device->oclass[NVDEV_ENGINE_DISP ] = &nve0_disp_oclass;
device->oclass[NVDEV_ENGINE_COPY0 ] = &nve0_copy0_oclass;
device->oclass[NVDEV_ENGINE_COPY1 ] = &nve0_copy1_oclass;
+ device->oclass[NVDEV_ENGINE_BSP ] = &nve0_bsp_oclass;
+ device->oclass[NVDEV_ENGINE_VP ] = &nve0_vp_oclass;
+ device->oclass[NVDEV_ENGINE_PPP ] = &nvc0_ppp_oclass;
break;
default:
nv_fatal(device, "unknown Kepler chipset\n");
diff --git a/drivers/gpu/drm/nouveau/core/subdev/devinit/nv50.c b/drivers/gpu/drm/nouveau/core/subdev/devinit/nv50.c
index 61becfa732e9..ae7249b09797 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/devinit/nv50.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/devinit/nv50.c
@@ -22,6 +22,10 @@
* Authors: Ben Skeggs
*/
+#include <subdev/bios.h>
+#include <subdev/bios/dcb.h>
+#include <subdev/bios/disp.h>
+#include <subdev/bios/init.h>
#include <subdev/devinit.h>
#include <subdev/vga.h>
@@ -55,7 +59,12 @@ nv50_devinit_dtor(struct nouveau_object *object)
static int
nv50_devinit_init(struct nouveau_object *object)
{
+ struct nouveau_bios *bios = nouveau_bios(object);
struct nv50_devinit_priv *priv = (void *)object;
+ struct nvbios_outp info;
+ struct dcb_output outp;
+ u8 ver = 0xff, hdr, cnt, len;
+ int ret, i = 0;
if (!priv->base.post) {
if (!nv_rdvgac(priv, 0, 0x00) &&
@@ -65,7 +74,30 @@ nv50_devinit_init(struct nouveau_object *object)
}
}
- return nouveau_devinit_init(&priv->base);
+ ret = nouveau_devinit_init(&priv->base);
+ if (ret)
+ return ret;
+
+ /* if we ran the init tables, execute first script pointer for each
+ * display table output entry that has a matching dcb entry.
+ */
+ while (priv->base.post && ver) {
+ u16 data = nvbios_outp_parse(bios, i++, &ver, &hdr, &cnt, &len, &info);
+ if (data && dcb_outp_match(bios, info.type, info.mask, &ver, &len, &outp)) {
+ struct nvbios_init init = {
+ .subdev = nv_subdev(priv),
+ .bios = bios,
+ .offset = info.script[0],
+ .outp = &outp,
+ .crtc = -1,
+ .execute = 1,
+ };
+
+ nvbios_exec(&init);
+ }
+ };
+
+ return 0;
}
static int
diff --git a/drivers/gpu/drm/nouveau/core/subdev/fb/base.c b/drivers/gpu/drm/nouveau/core/subdev/fb/base.c
index f0086de8af31..d6d16007ec1a 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/fb/base.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/fb/base.c
@@ -57,25 +57,45 @@ nouveau_fb_bios_memtype(struct nouveau_bios *bios)
}
int
-nouveau_fb_init(struct nouveau_fb *pfb)
+nouveau_fb_preinit(struct nouveau_fb *pfb)
{
- int ret, i;
+ static const char *name[] = {
+ [NV_MEM_TYPE_UNKNOWN] = "unknown",
+ [NV_MEM_TYPE_STOLEN ] = "stolen system memory",
+ [NV_MEM_TYPE_SGRAM ] = "SGRAM",
+ [NV_MEM_TYPE_SDRAM ] = "SDRAM",
+ [NV_MEM_TYPE_DDR1 ] = "DDR1",
+ [NV_MEM_TYPE_DDR2 ] = "DDR2",
+ [NV_MEM_TYPE_DDR3 ] = "DDR3",
+ [NV_MEM_TYPE_GDDR2 ] = "GDDR2",
+ [NV_MEM_TYPE_GDDR3 ] = "GDDR3",
+ [NV_MEM_TYPE_GDDR4 ] = "GDDR4",
+ [NV_MEM_TYPE_GDDR5 ] = "GDDR5",
+ };
+ int ret, tags;
- ret = nouveau_subdev_init(&pfb->base);
- if (ret)
- return ret;
+ tags = pfb->ram.init(pfb);
+ if (tags < 0 || !pfb->ram.size) {
+ nv_fatal(pfb, "error detecting memory configuration!!\n");
+ return (tags < 0) ? tags : -ERANGE;
+ }
- for (i = 0; i < pfb->tile.regions; i++)
- pfb->tile.prog(pfb, i, &pfb->tile.region[i]);
+ if (!nouveau_mm_initialised(&pfb->vram)) {
+ ret = nouveau_mm_init(&pfb->vram, 0, pfb->ram.size >> 12, 1);
+ if (ret)
+ return ret;
+ }
- return 0;
-}
+ if (!nouveau_mm_initialised(&pfb->tags) && tags) {
+ ret = nouveau_mm_init(&pfb->tags, 0, ++tags, 1);
+ if (ret)
+ return ret;
+ }
-int
-_nouveau_fb_init(struct nouveau_object *object)
-{
- struct nouveau_fb *pfb = (void *)object;
- return nouveau_fb_init(pfb);
+ nv_info(pfb, "RAM type: %s\n", name[pfb->ram.type]);
+ nv_info(pfb, "RAM size: %d MiB\n", (int)(pfb->ram.size >> 20));
+ nv_info(pfb, " ZCOMP: %d tags\n", tags);
+ return 0;
}
void
@@ -85,12 +105,8 @@ nouveau_fb_destroy(struct nouveau_fb *pfb)
for (i = 0; i < pfb->tile.regions; i++)
pfb->tile.fini(pfb, i, &pfb->tile.region[i]);
-
- if (pfb->tags.block_size)
- nouveau_mm_fini(&pfb->tags);
-
- if (pfb->vram.block_size)
- nouveau_mm_fini(&pfb->vram);
+ nouveau_mm_fini(&pfb->tags);
+ nouveau_mm_fini(&pfb->vram);
nouveau_subdev_destroy(&pfb->base);
}
@@ -101,30 +117,24 @@ _nouveau_fb_dtor(struct nouveau_object *object)
struct nouveau_fb *pfb = (void *)object;
nouveau_fb_destroy(pfb);
}
-
int
-nouveau_fb_created(struct nouveau_fb *pfb)
+nouveau_fb_init(struct nouveau_fb *pfb)
{
- static const char *name[] = {
- [NV_MEM_TYPE_UNKNOWN] = "unknown",
- [NV_MEM_TYPE_STOLEN ] = "stolen system memory",
- [NV_MEM_TYPE_SGRAM ] = "SGRAM",
- [NV_MEM_TYPE_SDRAM ] = "SDRAM",
- [NV_MEM_TYPE_DDR1 ] = "DDR1",
- [NV_MEM_TYPE_DDR2 ] = "DDR2",
- [NV_MEM_TYPE_DDR3 ] = "DDR3",
- [NV_MEM_TYPE_GDDR2 ] = "GDDR2",
- [NV_MEM_TYPE_GDDR3 ] = "GDDR3",
- [NV_MEM_TYPE_GDDR4 ] = "GDDR4",
- [NV_MEM_TYPE_GDDR5 ] = "GDDR5",
- };
+ int ret, i;
- if (pfb->ram.size == 0) {
- nv_fatal(pfb, "no vram detected!!\n");
- return -ERANGE;
- }
+ ret = nouveau_subdev_init(&pfb->base);
+ if (ret)
+ return ret;
+
+ for (i = 0; i < pfb->tile.regions; i++)
+ pfb->tile.prog(pfb, i, &pfb->tile.region[i]);
- nv_info(pfb, "RAM type: %s\n", name[pfb->ram.type]);
- nv_info(pfb, "RAM size: %d MiB\n", (int)(pfb->ram.size >> 20));
return 0;
}
+
+int
+_nouveau_fb_init(struct nouveau_object *object)
+{
+ struct nouveau_fb *pfb = (void *)object;
+ return nouveau_fb_init(pfb);
+}
diff --git a/drivers/gpu/drm/nouveau/core/subdev/fb/nv04.c b/drivers/gpu/drm/nouveau/core/subdev/fb/nv04.c
index eb06836b69f7..6e369f85361e 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/fb/nv04.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/fb/nv04.c
@@ -56,6 +56,37 @@ nv04_fb_memtype_valid(struct nouveau_fb *pfb, u32 tile_flags)
}
static int
+nv04_fb_vram_init(struct nouveau_fb *pfb)
+{
+ u32 boot0 = nv_rd32(pfb, NV04_PFB_BOOT_0);
+ if (boot0 & 0x00000100) {
+ pfb->ram.size = ((boot0 >> 12) & 0xf) * 2 + 2;
+ pfb->ram.size *= 1024 * 1024;
+ } else {
+ switch (boot0 & NV04_PFB_BOOT_0_RAM_AMOUNT) {
+ case NV04_PFB_BOOT_0_RAM_AMOUNT_32MB:
+ pfb->ram.size = 32 * 1024 * 1024;
+ break;
+ case NV04_PFB_BOOT_0_RAM_AMOUNT_16MB:
+ pfb->ram.size = 16 * 1024 * 1024;
+ break;
+ case NV04_PFB_BOOT_0_RAM_AMOUNT_8MB:
+ pfb->ram.size = 8 * 1024 * 1024;
+ break;
+ case NV04_PFB_BOOT_0_RAM_AMOUNT_4MB:
+ pfb->ram.size = 4 * 1024 * 1024;
+ break;
+ }
+ }
+
+ if ((boot0 & 0x00000038) <= 0x10)
+ pfb->ram.type = NV_MEM_TYPE_SGRAM;
+ else
+ pfb->ram.type = NV_MEM_TYPE_SDRAM;
+ return 0;
+}
+
+static int
nv04_fb_init(struct nouveau_object *object)
{
struct nv04_fb_priv *priv = (void *)object;
@@ -79,7 +110,6 @@ nv04_fb_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
struct nouveau_object **pobject)
{
struct nv04_fb_priv *priv;
- u32 boot0;
int ret;
ret = nouveau_fb_create(parent, engine, oclass, &priv);
@@ -87,35 +117,9 @@ nv04_fb_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
if (ret)
return ret;
- boot0 = nv_rd32(priv, NV04_PFB_BOOT_0);
- if (boot0 & 0x00000100) {
- priv->base.ram.size = ((boot0 >> 12) & 0xf) * 2 + 2;
- priv->base.ram.size *= 1024 * 1024;
- } else {
- switch (boot0 & NV04_PFB_BOOT_0_RAM_AMOUNT) {
- case NV04_PFB_BOOT_0_RAM_AMOUNT_32MB:
- priv->base.ram.size = 32 * 1024 * 1024;
- break;
- case NV04_PFB_BOOT_0_RAM_AMOUNT_16MB:
- priv->base.ram.size = 16 * 1024 * 1024;
- break;
- case NV04_PFB_BOOT_0_RAM_AMOUNT_8MB:
- priv->base.ram.size = 8 * 1024 * 1024;
- break;
- case NV04_PFB_BOOT_0_RAM_AMOUNT_4MB:
- priv->base.ram.size = 4 * 1024 * 1024;
- break;
- }
- }
-
- if ((boot0 & 0x00000038) <= 0x10)
- priv->base.ram.type = NV_MEM_TYPE_SGRAM;
- else
- priv->base.ram.type = NV_MEM_TYPE_SDRAM;
-
-
priv->base.memtype_valid = nv04_fb_memtype_valid;
- return nouveau_fb_created(&priv->base);
+ priv->base.ram.init = nv04_fb_vram_init;
+ return nouveau_fb_preinit(&priv->base);
}
struct nouveau_oclass
diff --git a/drivers/gpu/drm/nouveau/core/subdev/fb/nv10.c b/drivers/gpu/drm/nouveau/core/subdev/fb/nv10.c
index f037a422d2f4..edbbe26e858d 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/fb/nv10.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/fb/nv10.c
@@ -30,7 +30,20 @@ struct nv10_fb_priv {
struct nouveau_fb base;
};
-static void
+static int
+nv10_fb_vram_init(struct nouveau_fb *pfb)
+{
+ u32 cfg0 = nv_rd32(pfb, 0x100200);
+ if (cfg0 & 0x00000001)
+ pfb->ram.type = NV_MEM_TYPE_DDR1;
+ else
+ pfb->ram.type = NV_MEM_TYPE_SDRAM;
+
+ pfb->ram.size = nv_rd32(pfb, 0x10020c) & 0xff000000;
+ return 0;
+}
+
+void
nv10_fb_tile_init(struct nouveau_fb *pfb, int i, u32 addr, u32 size, u32 pitch,
u32 flags, struct nouveau_fb_tile *tile)
{
@@ -39,7 +52,7 @@ nv10_fb_tile_init(struct nouveau_fb *pfb, int i, u32 addr, u32 size, u32 pitch,
tile->pitch = pitch;
}
-static void
+void
nv10_fb_tile_fini(struct nouveau_fb *pfb, int i, struct nouveau_fb_tile *tile)
{
tile->addr = 0;
@@ -54,6 +67,7 @@ nv10_fb_tile_prog(struct nouveau_fb *pfb, int i, struct nouveau_fb_tile *tile)
nv_wr32(pfb, 0x100244 + (i * 0x10), tile->limit);
nv_wr32(pfb, 0x100248 + (i * 0x10), tile->pitch);
nv_wr32(pfb, 0x100240 + (i * 0x10), tile->addr);
+ nv_rd32(pfb, 0x100240 + (i * 0x10));
}
static int
@@ -61,7 +75,6 @@ nv10_fb_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
struct nouveau_oclass *oclass, void *data, u32 size,
struct nouveau_object **pobject)
{
- struct nouveau_device *device = nv_device(parent);
struct nv10_fb_priv *priv;
int ret;
@@ -70,42 +83,13 @@ nv10_fb_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
if (ret)
return ret;
- if (device->chipset == 0x1a || device->chipset == 0x1f) {
- struct pci_dev *bridge;
- u32 mem, mib;
-
- bridge = pci_get_bus_and_slot(0, PCI_DEVFN(0, 1));
- if (!bridge) {
- nv_fatal(device, "no bridge device\n");
- return 0;
- }
-
- if (device->chipset == 0x1a) {
- pci_read_config_dword(bridge, 0x7c, &mem);
- mib = ((mem >> 6) & 31) + 1;
- } else {
- pci_read_config_dword(bridge, 0x84, &mem);
- mib = ((mem >> 4) & 127) + 1;
- }
-
- priv->base.ram.type = NV_MEM_TYPE_STOLEN;
- priv->base.ram.size = mib * 1024 * 1024;
- } else {
- u32 cfg0 = nv_rd32(priv, 0x100200);
- if (cfg0 & 0x00000001)
- priv->base.ram.type = NV_MEM_TYPE_DDR1;
- else
- priv->base.ram.type = NV_MEM_TYPE_SDRAM;
-
- priv->base.ram.size = nv_rd32(priv, 0x10020c) & 0xff000000;
- }
-
priv->base.memtype_valid = nv04_fb_memtype_valid;
+ priv->base.ram.init = nv10_fb_vram_init;
priv->base.tile.regions = 8;
priv->base.tile.init = nv10_fb_tile_init;
priv->base.tile.fini = nv10_fb_tile_fini;
priv->base.tile.prog = nv10_fb_tile_prog;
- return nouveau_fb_created(&priv->base);
+ return nouveau_fb_preinit(&priv->base);
}
struct nouveau_oclass
diff --git a/drivers/gpu/drm/nouveau/core/subdev/fb/nv1a.c b/drivers/gpu/drm/nouveau/core/subdev/fb/nv1a.c
new file mode 100644
index 000000000000..48366841db4a
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/fb/nv1a.c
@@ -0,0 +1,89 @@
+/*
+ * Copyright (C) 2010 Francisco Jerez.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sublicense, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial
+ * portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
+ * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+ * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+ * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#include <subdev/fb.h>
+
+struct nv1a_fb_priv {
+ struct nouveau_fb base;
+};
+
+static int
+nv1a_fb_vram_init(struct nouveau_fb *pfb)
+{
+ struct pci_dev *bridge;
+ u32 mem, mib;
+
+ bridge = pci_get_bus_and_slot(0, PCI_DEVFN(0, 1));
+ if (!bridge) {
+ nv_fatal(pfb, "no bridge device\n");
+ return -ENODEV;
+ }
+
+ if (nv_device(pfb)->chipset == 0x1a) {
+ pci_read_config_dword(bridge, 0x7c, &mem);
+ mib = ((mem >> 6) & 31) + 1;
+ } else {
+ pci_read_config_dword(bridge, 0x84, &mem);
+ mib = ((mem >> 4) & 127) + 1;
+ }
+
+ pfb->ram.type = NV_MEM_TYPE_STOLEN;
+ pfb->ram.size = mib * 1024 * 1024;
+ return 0;
+}
+
+static int
+nv1a_fb_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
+ struct nouveau_oclass *oclass, void *data, u32 size,
+ struct nouveau_object **pobject)
+{
+ struct nv1a_fb_priv *priv;
+ int ret;
+
+ ret = nouveau_fb_create(parent, engine, oclass, &priv);
+ *pobject = nv_object(priv);
+ if (ret)
+ return ret;
+
+ priv->base.memtype_valid = nv04_fb_memtype_valid;
+ priv->base.ram.init = nv1a_fb_vram_init;
+ priv->base.tile.regions = 8;
+ priv->base.tile.init = nv10_fb_tile_init;
+ priv->base.tile.fini = nv10_fb_tile_fini;
+ priv->base.tile.prog = nv10_fb_tile_prog;
+ return nouveau_fb_preinit(&priv->base);
+}
+
+struct nouveau_oclass
+nv1a_fb_oclass = {
+ .handle = NV_SUBDEV(FB, 0x1a),
+ .ofuncs = &(struct nouveau_ofuncs) {
+ .ctor = nv1a_fb_ctor,
+ .dtor = _nouveau_fb_dtor,
+ .init = _nouveau_fb_init,
+ .fini = _nouveau_fb_fini,
+ },
+};
diff --git a/drivers/gpu/drm/nouveau/core/subdev/fb/nv20.c b/drivers/gpu/drm/nouveau/core/subdev/fb/nv20.c
index 4b3578fcb7fb..5d14612a2c8e 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/fb/nv20.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/fb/nv20.c
@@ -30,43 +30,54 @@ struct nv20_fb_priv {
struct nouveau_fb base;
};
-static void
+int
+nv20_fb_vram_init(struct nouveau_fb *pfb)
+{
+ u32 pbus1218 = nv_rd32(pfb, 0x001218);
+
+ switch (pbus1218 & 0x00000300) {
+ case 0x00000000: pfb->ram.type = NV_MEM_TYPE_SDRAM; break;
+ case 0x00000100: pfb->ram.type = NV_MEM_TYPE_DDR1; break;
+ case 0x00000200: pfb->ram.type = NV_MEM_TYPE_GDDR3; break;
+ case 0x00000300: pfb->ram.type = NV_MEM_TYPE_GDDR2; break;
+ }
+ pfb->ram.size = (nv_rd32(pfb, 0x10020c) & 0xff000000);
+ pfb->ram.parts = (nv_rd32(pfb, 0x100200) & 0x00000003) + 1;
+
+ return nv_rd32(pfb, 0x100320);
+}
+
+void
nv20_fb_tile_init(struct nouveau_fb *pfb, int i, u32 addr, u32 size, u32 pitch,
u32 flags, struct nouveau_fb_tile *tile)
{
- struct nouveau_device *device = nv_device(pfb);
- int bpp = (flags & 2) ? 32 : 16;
-
tile->addr = 0x00000001 | addr;
tile->limit = max(1u, addr + size) - 1;
tile->pitch = pitch;
-
- /* Allocate some of the on-die tag memory, used to store Z
- * compression meta-data (most likely just a bitmap determining
- * if a given tile is compressed or not).
- */
- size /= 256;
if (flags & 4) {
- if (!nouveau_mm_head(&pfb->tags, 1, size, size, 1, &tile->tag)) {
- /* Enable Z compression */
- tile->zcomp = tile->tag->offset;
- if (device->chipset >= 0x25) {
- if (bpp == 16)
- tile->zcomp |= 0x00100000;
- else
- tile->zcomp |= 0x00200000;
- } else {
- tile->zcomp |= 0x80000000;
- if (bpp != 16)
- tile->zcomp |= 0x04000000;
- }
- }
-
+ pfb->tile.comp(pfb, i, size, flags, tile);
tile->addr |= 2;
}
}
static void
+nv20_fb_tile_comp(struct nouveau_fb *pfb, int i, u32 size, u32 flags,
+ struct nouveau_fb_tile *tile)
+{
+ u32 tiles = DIV_ROUND_UP(size, 0x40);
+ u32 tags = round_up(tiles / pfb->ram.parts, 0x40);
+ if (!nouveau_mm_head(&pfb->tags, 1, tags, tags, 1, &tile->tag)) {
+ if (!(flags & 2)) tile->zcomp = 0x00000000; /* Z16 */
+ else tile->zcomp = 0x04000000; /* Z24S8 */
+ tile->zcomp |= tile->tag->offset;
+ tile->zcomp |= 0x80000000; /* enable */
+#ifdef __BIG_ENDIAN
+ tile->zcomp |= 0x08000000;
+#endif
+ }
+}
+
+void
nv20_fb_tile_fini(struct nouveau_fb *pfb, int i, struct nouveau_fb_tile *tile)
{
tile->addr = 0;
@@ -76,12 +87,13 @@ nv20_fb_tile_fini(struct nouveau_fb *pfb, int i, struct nouveau_fb_tile *tile)
nouveau_mm_free(&pfb->tags, &tile->tag);
}
-static void
+void
nv20_fb_tile_prog(struct nouveau_fb *pfb, int i, struct nouveau_fb_tile *tile)
{
nv_wr32(pfb, 0x100244 + (i * 0x10), tile->limit);
nv_wr32(pfb, 0x100248 + (i * 0x10), tile->pitch);
nv_wr32(pfb, 0x100240 + (i * 0x10), tile->addr);
+ nv_rd32(pfb, 0x100240 + (i * 0x10));
nv_wr32(pfb, 0x100300 + (i * 0x04), tile->zcomp);
}
@@ -90,9 +102,7 @@ nv20_fb_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
struct nouveau_oclass *oclass, void *data, u32 size,
struct nouveau_object **pobject)
{
- struct nouveau_device *device = nv_device(parent);
struct nv20_fb_priv *priv;
- u32 pbus1218;
int ret;
ret = nouveau_fb_create(parent, engine, oclass, &priv);
@@ -100,28 +110,14 @@ nv20_fb_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
if (ret)
return ret;
- pbus1218 = nv_rd32(priv, 0x001218);
- switch (pbus1218 & 0x00000300) {
- case 0x00000000: priv->base.ram.type = NV_MEM_TYPE_SDRAM; break;
- case 0x00000100: priv->base.ram.type = NV_MEM_TYPE_DDR1; break;
- case 0x00000200: priv->base.ram.type = NV_MEM_TYPE_GDDR3; break;
- case 0x00000300: priv->base.ram.type = NV_MEM_TYPE_GDDR2; break;
- }
- priv->base.ram.size = nv_rd32(priv, 0x10020c) & 0xff000000;
-
- if (device->chipset >= 0x25)
- ret = nouveau_mm_init(&priv->base.tags, 0, 64 * 1024, 1);
- else
- ret = nouveau_mm_init(&priv->base.tags, 0, 32 * 1024, 1);
- if (ret)
- return ret;
-
priv->base.memtype_valid = nv04_fb_memtype_valid;
+ priv->base.ram.init = nv20_fb_vram_init;
priv->base.tile.regions = 8;
priv->base.tile.init = nv20_fb_tile_init;
+ priv->base.tile.comp = nv20_fb_tile_comp;
priv->base.tile.fini = nv20_fb_tile_fini;
priv->base.tile.prog = nv20_fb_tile_prog;
- return nouveau_fb_created(&priv->base);
+ return nouveau_fb_preinit(&priv->base);
}
struct nouveau_oclass
diff --git a/drivers/gpu/drm/nouveau/core/subdev/fb/nv25.c b/drivers/gpu/drm/nouveau/core/subdev/fb/nv25.c
new file mode 100644
index 000000000000..0042ace6bef9
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/fb/nv25.c
@@ -0,0 +1,81 @@
+/*
+ * Copyright (C) 2010 Francisco Jerez.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sublicense, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial
+ * portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
+ * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+ * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+ * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#include <subdev/fb.h>
+
+struct nv25_fb_priv {
+ struct nouveau_fb base;
+};
+
+static void
+nv25_fb_tile_comp(struct nouveau_fb *pfb, int i, u32 size, u32 flags,
+ struct nouveau_fb_tile *tile)
+{
+ u32 tiles = DIV_ROUND_UP(size, 0x40);
+ u32 tags = round_up(tiles / pfb->ram.parts, 0x40);
+ if (!nouveau_mm_head(&pfb->tags, 1, tags, tags, 1, &tile->tag)) {
+ if (!(flags & 2)) tile->zcomp = 0x00100000; /* Z16 */
+ else tile->zcomp = 0x00200000; /* Z24S8 */
+ tile->zcomp |= tile->tag->offset;
+#ifdef __BIG_ENDIAN
+ tile->zcomp |= 0x01000000;
+#endif
+ }
+}
+
+static int
+nv25_fb_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
+ struct nouveau_oclass *oclass, void *data, u32 size,
+ struct nouveau_object **pobject)
+{
+ struct nv25_fb_priv *priv;
+ int ret;
+
+ ret = nouveau_fb_create(parent, engine, oclass, &priv);
+ *pobject = nv_object(priv);
+ if (ret)
+ return ret;
+
+ priv->base.memtype_valid = nv04_fb_memtype_valid;
+ priv->base.ram.init = nv20_fb_vram_init;
+ priv->base.tile.regions = 8;
+ priv->base.tile.init = nv20_fb_tile_init;
+ priv->base.tile.comp = nv25_fb_tile_comp;
+ priv->base.tile.fini = nv20_fb_tile_fini;
+ priv->base.tile.prog = nv20_fb_tile_prog;
+ return nouveau_fb_preinit(&priv->base);
+}
+
+struct nouveau_oclass
+nv25_fb_oclass = {
+ .handle = NV_SUBDEV(FB, 0x25),
+ .ofuncs = &(struct nouveau_ofuncs) {
+ .ctor = nv25_fb_ctor,
+ .dtor = _nouveau_fb_dtor,
+ .init = _nouveau_fb_init,
+ .fini = _nouveau_fb_fini,
+ },
+};
diff --git a/drivers/gpu/drm/nouveau/core/subdev/fb/nv30.c b/drivers/gpu/drm/nouveau/core/subdev/fb/nv30.c
index cba67bc91390..a7ba0d048aec 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/fb/nv30.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/fb/nv30.c
@@ -34,17 +34,36 @@ void
nv30_fb_tile_init(struct nouveau_fb *pfb, int i, u32 addr, u32 size, u32 pitch,
u32 flags, struct nouveau_fb_tile *tile)
{
- tile->addr = addr | 1;
+ /* for performance, select alternate bank offset for zeta */
+ if (!(flags & 4)) {
+ tile->addr = (0 << 4);
+ } else {
+ if (pfb->tile.comp) /* z compression */
+ pfb->tile.comp(pfb, i, size, flags, tile);
+ tile->addr = (1 << 4);
+ }
+
+ tile->addr |= 0x00000001; /* enable */
+ tile->addr |= addr;
tile->limit = max(1u, addr + size) - 1;
tile->pitch = pitch;
}
-void
-nv30_fb_tile_fini(struct nouveau_fb *pfb, int i, struct nouveau_fb_tile *tile)
+static void
+nv30_fb_tile_comp(struct nouveau_fb *pfb, int i, u32 size, u32 flags,
+ struct nouveau_fb_tile *tile)
{
- tile->addr = 0;
- tile->limit = 0;
- tile->pitch = 0;
+ u32 tiles = DIV_ROUND_UP(size, 0x40);
+ u32 tags = round_up(tiles / pfb->ram.parts, 0x40);
+ if (!nouveau_mm_head(&pfb->tags, 1, tags, tags, 1, &tile->tag)) {
+ if (flags & 2) tile->zcomp |= 0x01000000; /* Z16 */
+ else tile->zcomp |= 0x02000000; /* Z24S8 */
+ tile->zcomp |= ((tile->tag->offset ) >> 6);
+ tile->zcomp |= ((tile->tag->offset + tags - 1) >> 6) << 12;
+#ifdef __BIG_ENDIAN
+ tile->zcomp |= 0x10000000;
+#endif
+ }
}
static int
@@ -72,7 +91,7 @@ calc_ref(struct nv30_fb_priv *priv, int l, int k, int i)
return x;
}
-static int
+int
nv30_fb_init(struct nouveau_object *object)
{
struct nouveau_device *device = nv_device(object);
@@ -111,7 +130,6 @@ nv30_fb_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
struct nouveau_object **pobject)
{
struct nv30_fb_priv *priv;
- u32 pbus1218;
int ret;
ret = nouveau_fb_create(parent, engine, oclass, &priv);
@@ -119,21 +137,14 @@ nv30_fb_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
if (ret)
return ret;
- pbus1218 = nv_rd32(priv, 0x001218);
- switch (pbus1218 & 0x00000300) {
- case 0x00000000: priv->base.ram.type = NV_MEM_TYPE_SDRAM; break;
- case 0x00000100: priv->base.ram.type = NV_MEM_TYPE_DDR1; break;
- case 0x00000200: priv->base.ram.type = NV_MEM_TYPE_GDDR3; break;
- case 0x00000300: priv->base.ram.type = NV_MEM_TYPE_GDDR2; break;
- }
- priv->base.ram.size = nv_rd32(priv, 0x10020c) & 0xff000000;
-
priv->base.memtype_valid = nv04_fb_memtype_valid;
+ priv->base.ram.init = nv20_fb_vram_init;
priv->base.tile.regions = 8;
priv->base.tile.init = nv30_fb_tile_init;
- priv->base.tile.fini = nv30_fb_tile_fini;
- priv->base.tile.prog = nv10_fb_tile_prog;
- return nouveau_fb_created(&priv->base);
+ priv->base.tile.comp = nv30_fb_tile_comp;
+ priv->base.tile.fini = nv20_fb_tile_fini;
+ priv->base.tile.prog = nv20_fb_tile_prog;
+ return nouveau_fb_preinit(&priv->base);
}
struct nouveau_oclass
diff --git a/drivers/gpu/drm/nouveau/core/subdev/fb/nv35.c b/drivers/gpu/drm/nouveau/core/subdev/fb/nv35.c
new file mode 100644
index 000000000000..092f6f4f3521
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/fb/nv35.c
@@ -0,0 +1,82 @@
+/*
+ * Copyright (C) 2010 Francisco Jerez.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sublicense, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial
+ * portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
+ * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+ * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+ * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#include <subdev/fb.h>
+
+struct nv35_fb_priv {
+ struct nouveau_fb base;
+};
+
+static void
+nv35_fb_tile_comp(struct nouveau_fb *pfb, int i, u32 size, u32 flags,
+ struct nouveau_fb_tile *tile)
+{
+ u32 tiles = DIV_ROUND_UP(size, 0x40);
+ u32 tags = round_up(tiles / pfb->ram.parts, 0x40);
+ if (!nouveau_mm_head(&pfb->tags, 1, tags, tags, 1, &tile->tag)) {
+ if (flags & 2) tile->zcomp |= 0x04000000; /* Z16 */
+ else tile->zcomp |= 0x08000000; /* Z24S8 */
+ tile->zcomp |= ((tile->tag->offset ) >> 6);
+ tile->zcomp |= ((tile->tag->offset + tags - 1) >> 6) << 13;
+#ifdef __BIG_ENDIAN
+ tile->zcomp |= 0x40000000;
+#endif
+ }
+}
+
+static int
+nv35_fb_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
+ struct nouveau_oclass *oclass, void *data, u32 size,
+ struct nouveau_object **pobject)
+{
+ struct nv35_fb_priv *priv;
+ int ret;
+
+ ret = nouveau_fb_create(parent, engine, oclass, &priv);
+ *pobject = nv_object(priv);
+ if (ret)
+ return ret;
+
+ priv->base.memtype_valid = nv04_fb_memtype_valid;
+ priv->base.ram.init = nv20_fb_vram_init;
+ priv->base.tile.regions = 8;
+ priv->base.tile.init = nv30_fb_tile_init;
+ priv->base.tile.comp = nv35_fb_tile_comp;
+ priv->base.tile.fini = nv20_fb_tile_fini;
+ priv->base.tile.prog = nv20_fb_tile_prog;
+ return nouveau_fb_preinit(&priv->base);
+}
+
+struct nouveau_oclass
+nv35_fb_oclass = {
+ .handle = NV_SUBDEV(FB, 0x35),
+ .ofuncs = &(struct nouveau_ofuncs) {
+ .ctor = nv35_fb_ctor,
+ .dtor = _nouveau_fb_dtor,
+ .init = nv30_fb_init,
+ .fini = _nouveau_fb_fini,
+ },
+};
diff --git a/drivers/gpu/drm/nouveau/core/subdev/fb/nv36.c b/drivers/gpu/drm/nouveau/core/subdev/fb/nv36.c
new file mode 100644
index 000000000000..797ab3b821b9
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/fb/nv36.c
@@ -0,0 +1,82 @@
+/*
+ * Copyright (C) 2010 Francisco Jerez.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sublicense, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial
+ * portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
+ * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+ * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+ * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#include <subdev/fb.h>
+
+struct nv36_fb_priv {
+ struct nouveau_fb base;
+};
+
+static void
+nv36_fb_tile_comp(struct nouveau_fb *pfb, int i, u32 size, u32 flags,
+ struct nouveau_fb_tile *tile)
+{
+ u32 tiles = DIV_ROUND_UP(size, 0x40);
+ u32 tags = round_up(tiles / pfb->ram.parts, 0x40);
+ if (!nouveau_mm_head(&pfb->tags, 1, tags, tags, 1, &tile->tag)) {
+ if (flags & 2) tile->zcomp |= 0x10000000; /* Z16 */
+ else tile->zcomp |= 0x20000000; /* Z24S8 */
+ tile->zcomp |= ((tile->tag->offset ) >> 6);
+ tile->zcomp |= ((tile->tag->offset + tags - 1) >> 6) << 14;
+#ifdef __BIG_ENDIAN
+ tile->zcomp |= 0x80000000;
+#endif
+ }
+}
+
+static int
+nv36_fb_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
+ struct nouveau_oclass *oclass, void *data, u32 size,
+ struct nouveau_object **pobject)
+{
+ struct nv36_fb_priv *priv;
+ int ret;
+
+ ret = nouveau_fb_create(parent, engine, oclass, &priv);
+ *pobject = nv_object(priv);
+ if (ret)
+ return ret;
+
+ priv->base.memtype_valid = nv04_fb_memtype_valid;
+ priv->base.ram.init = nv20_fb_vram_init;
+ priv->base.tile.regions = 8;
+ priv->base.tile.init = nv30_fb_tile_init;
+ priv->base.tile.comp = nv36_fb_tile_comp;
+ priv->base.tile.fini = nv20_fb_tile_fini;
+ priv->base.tile.prog = nv20_fb_tile_prog;
+ return nouveau_fb_preinit(&priv->base);
+}
+
+struct nouveau_oclass
+nv36_fb_oclass = {
+ .handle = NV_SUBDEV(FB, 0x36),
+ .ofuncs = &(struct nouveau_ofuncs) {
+ .ctor = nv36_fb_ctor,
+ .dtor = _nouveau_fb_dtor,
+ .init = nv30_fb_init,
+ .fini = _nouveau_fb_fini,
+ },
+};
diff --git a/drivers/gpu/drm/nouveau/core/subdev/fb/nv40.c b/drivers/gpu/drm/nouveau/core/subdev/fb/nv40.c
index 347a496fcad8..65e131b90f37 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/fb/nv40.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/fb/nv40.c
@@ -30,34 +30,37 @@ struct nv40_fb_priv {
struct nouveau_fb base;
};
-static inline int
-nv44_graph_class(struct nouveau_device *device)
-{
- if ((device->chipset & 0xf0) == 0x60)
- return 1;
-
- return !(0x0baf & (1 << (device->chipset & 0x0f)));
-}
-
-static void
-nv40_fb_tile_prog(struct nouveau_fb *pfb, int i, struct nouveau_fb_tile *tile)
+static int
+nv40_fb_vram_init(struct nouveau_fb *pfb)
{
- nv_wr32(pfb, 0x100604 + (i * 0x10), tile->limit);
- nv_wr32(pfb, 0x100608 + (i * 0x10), tile->pitch);
- nv_wr32(pfb, 0x100600 + (i * 0x10), tile->addr);
-}
+ u32 pbus1218 = nv_rd32(pfb, 0x001218);
+ switch (pbus1218 & 0x00000300) {
+ case 0x00000000: pfb->ram.type = NV_MEM_TYPE_SDRAM; break;
+ case 0x00000100: pfb->ram.type = NV_MEM_TYPE_DDR1; break;
+ case 0x00000200: pfb->ram.type = NV_MEM_TYPE_GDDR3; break;
+ case 0x00000300: pfb->ram.type = NV_MEM_TYPE_DDR2; break;
+ }
-static void
-nv40_fb_init_gart(struct nv40_fb_priv *priv)
-{
- nv_wr32(priv, 0x100800, 0x00000001);
+ pfb->ram.size = nv_rd32(pfb, 0x10020c) & 0xff000000;
+ pfb->ram.parts = (nv_rd32(pfb, 0x100200) & 0x00000003) + 1;
+ return nv_rd32(pfb, 0x100320);
}
-static void
-nv44_fb_init_gart(struct nv40_fb_priv *priv)
+void
+nv40_fb_tile_comp(struct nouveau_fb *pfb, int i, u32 size, u32 flags,
+ struct nouveau_fb_tile *tile)
{
- nv_wr32(priv, 0x100850, 0x80000000);
- nv_wr32(priv, 0x100800, 0x00000001);
+ u32 tiles = DIV_ROUND_UP(size, 0x80);
+ u32 tags = round_up(tiles / pfb->ram.parts, 0x100);
+ if ( (flags & 2) &&
+ !nouveau_mm_head(&pfb->tags, 1, tags, tags, 1, &tile->tag)) {
+ tile->zcomp = 0x28000000; /* Z24S8_SPLIT_GRAD */
+ tile->zcomp |= ((tile->tag->offset ) >> 8);
+ tile->zcomp |= ((tile->tag->offset + tags - 1) >> 8) << 13;
+#ifdef __BIG_ENDIAN
+ tile->zcomp |= 0x40000000;
+#endif
+ }
}
static int
@@ -70,19 +73,7 @@ nv40_fb_init(struct nouveau_object *object)
if (ret)
return ret;
- switch (nv_device(priv)->chipset) {
- case 0x40:
- case 0x45:
- nv_mask(priv, 0x10033c, 0x00008000, 0x00000000);
- break;
- default:
- if (nv44_graph_class(nv_device(priv)))
- nv44_fb_init_gart(priv);
- else
- nv40_fb_init_gart(priv);
- break;
- }
-
+ nv_mask(priv, 0x10033c, 0x00008000, 0x00000000);
return 0;
}
@@ -91,7 +82,6 @@ nv40_fb_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
struct nouveau_oclass *oclass, void *data, u32 size,
struct nouveau_object **pobject)
{
- struct nouveau_device *device = nv_device(parent);
struct nv40_fb_priv *priv;
int ret;
@@ -100,69 +90,14 @@ nv40_fb_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
if (ret)
return ret;
- /* 0x001218 is actually present on a few other NV4X I looked at,
- * and even contains sane values matching 0x100474. From looking
- * at various vbios images however, this isn't the case everywhere.
- * So, I chose to use the same regs I've seen NVIDIA reading around
- * the memory detection, hopefully that'll get us the right numbers
- */
- if (device->chipset == 0x40) {
- u32 pbus1218 = nv_rd32(priv, 0x001218);
- switch (pbus1218 & 0x00000300) {
- case 0x00000000: priv->base.ram.type = NV_MEM_TYPE_SDRAM; break;
- case 0x00000100: priv->base.ram.type = NV_MEM_TYPE_DDR1; break;
- case 0x00000200: priv->base.ram.type = NV_MEM_TYPE_GDDR3; break;
- case 0x00000300: priv->base.ram.type = NV_MEM_TYPE_DDR2; break;
- }
- } else
- if (device->chipset == 0x49 || device->chipset == 0x4b) {
- u32 pfb914 = nv_rd32(priv, 0x100914);
- switch (pfb914 & 0x00000003) {
- case 0x00000000: priv->base.ram.type = NV_MEM_TYPE_DDR1; break;
- case 0x00000001: priv->base.ram.type = NV_MEM_TYPE_DDR2; break;
- case 0x00000002: priv->base.ram.type = NV_MEM_TYPE_GDDR3; break;
- case 0x00000003: break;
- }
- } else
- if (device->chipset != 0x4e) {
- u32 pfb474 = nv_rd32(priv, 0x100474);
- if (pfb474 & 0x00000004)
- priv->base.ram.type = NV_MEM_TYPE_GDDR3;
- if (pfb474 & 0x00000002)
- priv->base.ram.type = NV_MEM_TYPE_DDR2;
- if (pfb474 & 0x00000001)
- priv->base.ram.type = NV_MEM_TYPE_DDR1;
- } else {
- priv->base.ram.type = NV_MEM_TYPE_STOLEN;
- }
-
- priv->base.ram.size = nv_rd32(priv, 0x10020c) & 0xff000000;
-
priv->base.memtype_valid = nv04_fb_memtype_valid;
- switch (device->chipset) {
- case 0x40:
- case 0x45:
- priv->base.tile.regions = 8;
- break;
- case 0x46:
- case 0x47:
- case 0x49:
- case 0x4b:
- case 0x4c:
- priv->base.tile.regions = 15;
- break;
- default:
- priv->base.tile.regions = 12;
- break;
- }
+ priv->base.ram.init = nv40_fb_vram_init;
+ priv->base.tile.regions = 8;
priv->base.tile.init = nv30_fb_tile_init;
- priv->base.tile.fini = nv30_fb_tile_fini;
- if (device->chipset == 0x40)
- priv->base.tile.prog = nv10_fb_tile_prog;
- else
- priv->base.tile.prog = nv40_fb_tile_prog;
-
- return nouveau_fb_created(&priv->base);
+ priv->base.tile.comp = nv40_fb_tile_comp;
+ priv->base.tile.fini = nv20_fb_tile_fini;
+ priv->base.tile.prog = nv20_fb_tile_prog;
+ return nouveau_fb_preinit(&priv->base);
}
diff --git a/drivers/gpu/drm/nouveau/core/subdev/fb/nv41.c b/drivers/gpu/drm/nouveau/core/subdev/fb/nv41.c
new file mode 100644
index 000000000000..e9e5a08c41a1
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/fb/nv41.c
@@ -0,0 +1,106 @@
+/*
+ * Copyright (C) 2010 Francisco Jerez.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sublicense, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial
+ * portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
+ * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+ * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+ * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#include <subdev/fb.h>
+
+struct nv41_fb_priv {
+ struct nouveau_fb base;
+};
+
+int
+nv41_fb_vram_init(struct nouveau_fb *pfb)
+{
+ u32 pfb474 = nv_rd32(pfb, 0x100474);
+ if (pfb474 & 0x00000004)
+ pfb->ram.type = NV_MEM_TYPE_GDDR3;
+ if (pfb474 & 0x00000002)
+ pfb->ram.type = NV_MEM_TYPE_DDR2;
+ if (pfb474 & 0x00000001)
+ pfb->ram.type = NV_MEM_TYPE_DDR1;
+
+ pfb->ram.size = nv_rd32(pfb, 0x10020c) & 0xff000000;
+ pfb->ram.parts = (nv_rd32(pfb, 0x100200) & 0x00000003) + 1;
+ return nv_rd32(pfb, 0x100320);
+}
+
+void
+nv41_fb_tile_prog(struct nouveau_fb *pfb, int i, struct nouveau_fb_tile *tile)
+{
+ nv_wr32(pfb, 0x100604 + (i * 0x10), tile->limit);
+ nv_wr32(pfb, 0x100608 + (i * 0x10), tile->pitch);
+ nv_wr32(pfb, 0x100600 + (i * 0x10), tile->addr);
+ nv_rd32(pfb, 0x100600 + (i * 0x10));
+ nv_wr32(pfb, 0x100700 + (i * 0x04), tile->zcomp);
+}
+
+int
+nv41_fb_init(struct nouveau_object *object)
+{
+ struct nv41_fb_priv *priv = (void *)object;
+ int ret;
+
+ ret = nouveau_fb_init(&priv->base);
+ if (ret)
+ return ret;
+
+ nv_wr32(priv, 0x100800, 0x00000001);
+ return 0;
+}
+
+static int
+nv41_fb_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
+ struct nouveau_oclass *oclass, void *data, u32 size,
+ struct nouveau_object **pobject)
+{
+ struct nv41_fb_priv *priv;
+ int ret;
+
+ ret = nouveau_fb_create(parent, engine, oclass, &priv);
+ *pobject = nv_object(priv);
+ if (ret)
+ return ret;
+
+ priv->base.memtype_valid = nv04_fb_memtype_valid;
+ priv->base.ram.init = nv41_fb_vram_init;
+ priv->base.tile.regions = 12;
+ priv->base.tile.init = nv30_fb_tile_init;
+ priv->base.tile.comp = nv40_fb_tile_comp;
+ priv->base.tile.fini = nv20_fb_tile_fini;
+ priv->base.tile.prog = nv41_fb_tile_prog;
+ return nouveau_fb_preinit(&priv->base);
+}
+
+
+struct nouveau_oclass
+nv41_fb_oclass = {
+ .handle = NV_SUBDEV(FB, 0x41),
+ .ofuncs = &(struct nouveau_ofuncs) {
+ .ctor = nv41_fb_ctor,
+ .dtor = _nouveau_fb_dtor,
+ .init = nv41_fb_init,
+ .fini = _nouveau_fb_fini,
+ },
+};
diff --git a/drivers/gpu/drm/nouveau/core/subdev/fb/nv44.c b/drivers/gpu/drm/nouveau/core/subdev/fb/nv44.c
new file mode 100644
index 000000000000..ae89b5006f7a
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/fb/nv44.c
@@ -0,0 +1,114 @@
+/*
+ * Copyright (C) 2010 Francisco Jerez.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sublicense, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial
+ * portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
+ * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+ * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+ * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#include <subdev/fb.h>
+
+struct nv44_fb_priv {
+ struct nouveau_fb base;
+};
+
+int
+nv44_fb_vram_init(struct nouveau_fb *pfb)
+{
+ u32 pfb474 = nv_rd32(pfb, 0x100474);
+ if (pfb474 & 0x00000004)
+ pfb->ram.type = NV_MEM_TYPE_GDDR3;
+ if (pfb474 & 0x00000002)
+ pfb->ram.type = NV_MEM_TYPE_DDR2;
+ if (pfb474 & 0x00000001)
+ pfb->ram.type = NV_MEM_TYPE_DDR1;
+
+ pfb->ram.size = nv_rd32(pfb, 0x10020c) & 0xff000000;
+ return 0;
+}
+
+static void
+nv44_fb_tile_init(struct nouveau_fb *pfb, int i, u32 addr, u32 size, u32 pitch,
+ u32 flags, struct nouveau_fb_tile *tile)
+{
+ tile->addr = 0x00000001; /* mode = vram */
+ tile->addr |= addr;
+ tile->limit = max(1u, addr + size) - 1;
+ tile->pitch = pitch;
+}
+
+void
+nv44_fb_tile_prog(struct nouveau_fb *pfb, int i, struct nouveau_fb_tile *tile)
+{
+ nv_wr32(pfb, 0x100604 + (i * 0x10), tile->limit);
+ nv_wr32(pfb, 0x100608 + (i * 0x10), tile->pitch);
+ nv_wr32(pfb, 0x100600 + (i * 0x10), tile->addr);
+ nv_rd32(pfb, 0x100600 + (i * 0x10));
+}
+
+int
+nv44_fb_init(struct nouveau_object *object)
+{
+ struct nv44_fb_priv *priv = (void *)object;
+ int ret;
+
+ ret = nouveau_fb_init(&priv->base);
+ if (ret)
+ return ret;
+
+ nv_wr32(priv, 0x100850, 0x80000000);
+ nv_wr32(priv, 0x100800, 0x00000001);
+ return 0;
+}
+
+static int
+nv44_fb_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
+ struct nouveau_oclass *oclass, void *data, u32 size,
+ struct nouveau_object **pobject)
+{
+ struct nv44_fb_priv *priv;
+ int ret;
+
+ ret = nouveau_fb_create(parent, engine, oclass, &priv);
+ *pobject = nv_object(priv);
+ if (ret)
+ return ret;
+
+ priv->base.memtype_valid = nv04_fb_memtype_valid;
+ priv->base.ram.init = nv44_fb_vram_init;
+ priv->base.tile.regions = 12;
+ priv->base.tile.init = nv44_fb_tile_init;
+ priv->base.tile.fini = nv20_fb_tile_fini;
+ priv->base.tile.prog = nv44_fb_tile_prog;
+ return nouveau_fb_preinit(&priv->base);
+}
+
+
+struct nouveau_oclass
+nv44_fb_oclass = {
+ .handle = NV_SUBDEV(FB, 0x44),
+ .ofuncs = &(struct nouveau_ofuncs) {
+ .ctor = nv44_fb_ctor,
+ .dtor = _nouveau_fb_dtor,
+ .init = nv44_fb_init,
+ .fini = _nouveau_fb_fini,
+ },
+};
diff --git a/drivers/gpu/drm/nouveau/core/subdev/fb/nv46.c b/drivers/gpu/drm/nouveau/core/subdev/fb/nv46.c
new file mode 100644
index 000000000000..589b93ea2994
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/fb/nv46.c
@@ -0,0 +1,79 @@
+/*
+ * Copyright (C) 2010 Francisco Jerez.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sublicense, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial
+ * portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
+ * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+ * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+ * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#include <subdev/fb.h>
+
+struct nv46_fb_priv {
+ struct nouveau_fb base;
+};
+
+void
+nv46_fb_tile_init(struct nouveau_fb *pfb, int i, u32 addr, u32 size, u32 pitch,
+ u32 flags, struct nouveau_fb_tile *tile)
+{
+ /* for performance, select alternate bank offset for zeta */
+ if (!(flags & 4)) tile->addr = (0 << 3);
+ else tile->addr = (1 << 3);
+
+ tile->addr |= 0x00000001; /* mode = vram */
+ tile->addr |= addr;
+ tile->limit = max(1u, addr + size) - 1;
+ tile->pitch = pitch;
+}
+
+static int
+nv46_fb_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
+ struct nouveau_oclass *oclass, void *data, u32 size,
+ struct nouveau_object **pobject)
+{
+ struct nv46_fb_priv *priv;
+ int ret;
+
+ ret = nouveau_fb_create(parent, engine, oclass, &priv);
+ *pobject = nv_object(priv);
+ if (ret)
+ return ret;
+
+ priv->base.memtype_valid = nv04_fb_memtype_valid;
+ priv->base.ram.init = nv44_fb_vram_init;
+ priv->base.tile.regions = 15;
+ priv->base.tile.init = nv46_fb_tile_init;
+ priv->base.tile.fini = nv20_fb_tile_fini;
+ priv->base.tile.prog = nv44_fb_tile_prog;
+ return nouveau_fb_preinit(&priv->base);
+}
+
+
+struct nouveau_oclass
+nv46_fb_oclass = {
+ .handle = NV_SUBDEV(FB, 0x46),
+ .ofuncs = &(struct nouveau_ofuncs) {
+ .ctor = nv46_fb_ctor,
+ .dtor = _nouveau_fb_dtor,
+ .init = nv44_fb_init,
+ .fini = _nouveau_fb_fini,
+ },
+};
diff --git a/drivers/gpu/drm/nouveau/core/subdev/fb/nv47.c b/drivers/gpu/drm/nouveau/core/subdev/fb/nv47.c
new file mode 100644
index 000000000000..818bba35b368
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/fb/nv47.c
@@ -0,0 +1,66 @@
+/*
+ * Copyright (C) 2010 Francisco Jerez.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sublicense, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial
+ * portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
+ * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+ * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+ * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#include <subdev/fb.h>
+
+struct nv47_fb_priv {
+ struct nouveau_fb base;
+};
+
+static int
+nv47_fb_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
+ struct nouveau_oclass *oclass, void *data, u32 size,
+ struct nouveau_object **pobject)
+{
+ struct nv47_fb_priv *priv;
+ int ret;
+
+ ret = nouveau_fb_create(parent, engine, oclass, &priv);
+ *pobject = nv_object(priv);
+ if (ret)
+ return ret;
+
+ priv->base.memtype_valid = nv04_fb_memtype_valid;
+ priv->base.ram.init = nv41_fb_vram_init;
+ priv->base.tile.regions = 15;
+ priv->base.tile.init = nv30_fb_tile_init;
+ priv->base.tile.comp = nv40_fb_tile_comp;
+ priv->base.tile.fini = nv20_fb_tile_fini;
+ priv->base.tile.prog = nv41_fb_tile_prog;
+ return nouveau_fb_preinit(&priv->base);
+}
+
+
+struct nouveau_oclass
+nv47_fb_oclass = {
+ .handle = NV_SUBDEV(FB, 0x47),
+ .ofuncs = &(struct nouveau_ofuncs) {
+ .ctor = nv47_fb_ctor,
+ .dtor = _nouveau_fb_dtor,
+ .init = nv41_fb_init,
+ .fini = _nouveau_fb_fini,
+ },
+};
diff --git a/drivers/gpu/drm/nouveau/core/subdev/fb/nv49.c b/drivers/gpu/drm/nouveau/core/subdev/fb/nv49.c
new file mode 100644
index 000000000000..84a31af16ab4
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/fb/nv49.c
@@ -0,0 +1,84 @@
+/*
+ * Copyright (C) 2010 Francisco Jerez.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sublicense, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial
+ * portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
+ * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+ * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+ * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#include <subdev/fb.h>
+
+struct nv49_fb_priv {
+ struct nouveau_fb base;
+};
+
+static int
+nv49_fb_vram_init(struct nouveau_fb *pfb)
+{
+ u32 pfb914 = nv_rd32(pfb, 0x100914);
+
+ switch (pfb914 & 0x00000003) {
+ case 0x00000000: pfb->ram.type = NV_MEM_TYPE_DDR1; break;
+ case 0x00000001: pfb->ram.type = NV_MEM_TYPE_DDR2; break;
+ case 0x00000002: pfb->ram.type = NV_MEM_TYPE_GDDR3; break;
+ case 0x00000003: break;
+ }
+
+ pfb->ram.size = nv_rd32(pfb, 0x10020c) & 0xff000000;
+ pfb->ram.parts = (nv_rd32(pfb, 0x100200) & 0x00000003) + 1;
+ return nv_rd32(pfb, 0x100320);
+}
+
+static int
+nv49_fb_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
+ struct nouveau_oclass *oclass, void *data, u32 size,
+ struct nouveau_object **pobject)
+{
+ struct nv49_fb_priv *priv;
+ int ret;
+
+ ret = nouveau_fb_create(parent, engine, oclass, &priv);
+ *pobject = nv_object(priv);
+ if (ret)
+ return ret;
+
+ priv->base.memtype_valid = nv04_fb_memtype_valid;
+ priv->base.ram.init = nv49_fb_vram_init;
+ priv->base.tile.regions = 15;
+ priv->base.tile.init = nv30_fb_tile_init;
+ priv->base.tile.comp = nv40_fb_tile_comp;
+ priv->base.tile.fini = nv20_fb_tile_fini;
+ priv->base.tile.prog = nv41_fb_tile_prog;
+
+ return nouveau_fb_preinit(&priv->base);
+}
+
+
+struct nouveau_oclass
+nv49_fb_oclass = {
+ .handle = NV_SUBDEV(FB, 0x49),
+ .ofuncs = &(struct nouveau_ofuncs) {
+ .ctor = nv49_fb_ctor,
+ .dtor = _nouveau_fb_dtor,
+ .init = nv41_fb_init,
+ .fini = _nouveau_fb_fini,
+ },
+};
diff --git a/drivers/gpu/drm/nouveau/core/subdev/fb/nv4e.c b/drivers/gpu/drm/nouveau/core/subdev/fb/nv4e.c
new file mode 100644
index 000000000000..797fd558170b
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/fb/nv4e.c
@@ -0,0 +1,72 @@
+/*
+ * Copyright (C) 2010 Francisco Jerez.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sublicense, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial
+ * portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
+ * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+ * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+ * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#include <subdev/fb.h>
+
+struct nv4e_fb_priv {
+ struct nouveau_fb base;
+};
+
+static int
+nv4e_fb_vram_init(struct nouveau_fb *pfb)
+{
+ pfb->ram.size = nv_rd32(pfb, 0x10020c) & 0xff000000;
+ pfb->ram.type = NV_MEM_TYPE_STOLEN;
+ return 0;
+}
+
+static int
+nv4e_fb_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
+ struct nouveau_oclass *oclass, void *data, u32 size,
+ struct nouveau_object **pobject)
+{
+ struct nv4e_fb_priv *priv;
+ int ret;
+
+ ret = nouveau_fb_create(parent, engine, oclass, &priv);
+ *pobject = nv_object(priv);
+ if (ret)
+ return ret;
+
+ priv->base.memtype_valid = nv04_fb_memtype_valid;
+ priv->base.ram.init = nv4e_fb_vram_init;
+ priv->base.tile.regions = 12;
+ priv->base.tile.init = nv46_fb_tile_init;
+ priv->base.tile.fini = nv20_fb_tile_fini;
+ priv->base.tile.prog = nv44_fb_tile_prog;
+ return nouveau_fb_preinit(&priv->base);
+}
+
+struct nouveau_oclass
+nv4e_fb_oclass = {
+ .handle = NV_SUBDEV(FB, 0x4e),
+ .ofuncs = &(struct nouveau_ofuncs) {
+ .ctor = nv4e_fb_ctor,
+ .dtor = _nouveau_fb_dtor,
+ .init = nv44_fb_init,
+ .fini = _nouveau_fb_fini,
+ },
+};
diff --git a/drivers/gpu/drm/nouveau/core/subdev/fb/nv50.c b/drivers/gpu/drm/nouveau/core/subdev/fb/nv50.c
index 5f570806143a..487cb8c6c204 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/fb/nv50.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/fb/nv50.c
@@ -51,6 +51,101 @@ nv50_fb_memtype_valid(struct nouveau_fb *pfb, u32 memtype)
return types[(memtype & 0xff00) >> 8] != 0;
}
+static u32
+nv50_fb_vram_rblock(struct nouveau_fb *pfb)
+{
+ int i, parts, colbits, rowbitsa, rowbitsb, banks;
+ u64 rowsize, predicted;
+ u32 r0, r4, rt, ru, rblock_size;
+
+ r0 = nv_rd32(pfb, 0x100200);
+ r4 = nv_rd32(pfb, 0x100204);
+ rt = nv_rd32(pfb, 0x100250);
+ ru = nv_rd32(pfb, 0x001540);
+ nv_debug(pfb, "memcfg 0x%08x 0x%08x 0x%08x 0x%08x\n", r0, r4, rt, ru);
+
+ for (i = 0, parts = 0; i < 8; i++) {
+ if (ru & (0x00010000 << i))
+ parts++;
+ }
+
+ colbits = (r4 & 0x0000f000) >> 12;
+ rowbitsa = ((r4 & 0x000f0000) >> 16) + 8;
+ rowbitsb = ((r4 & 0x00f00000) >> 20) + 8;
+ banks = 1 << (((r4 & 0x03000000) >> 24) + 2);
+
+ rowsize = parts * banks * (1 << colbits) * 8;
+ predicted = rowsize << rowbitsa;
+ if (r0 & 0x00000004)
+ predicted += rowsize << rowbitsb;
+
+ if (predicted != pfb->ram.size) {
+ nv_warn(pfb, "memory controller reports %d MiB VRAM\n",
+ (u32)(pfb->ram.size >> 20));
+ }
+
+ rblock_size = rowsize;
+ if (rt & 1)
+ rblock_size *= 3;
+
+ nv_debug(pfb, "rblock %d bytes\n", rblock_size);
+ return rblock_size;
+}
+
+static int
+nv50_fb_vram_init(struct nouveau_fb *pfb)
+{
+ struct nouveau_device *device = nv_device(pfb);
+ struct nouveau_bios *bios = nouveau_bios(device);
+ const u32 rsvd_head = ( 256 * 1024) >> 12; /* vga memory */
+ const u32 rsvd_tail = (1024 * 1024) >> 12; /* vbios etc */
+ u32 size;
+ int ret;
+
+ pfb->ram.size = nv_rd32(pfb, 0x10020c);
+ pfb->ram.size = (pfb->ram.size & 0xffffff00) |
+ ((pfb->ram.size & 0x000000ff) << 32);
+
+ size = (pfb->ram.size >> 12) - rsvd_head - rsvd_tail;
+ switch (device->chipset) {
+ case 0xaa:
+ case 0xac:
+ case 0xaf: /* IGPs, no reordering, no real VRAM */
+ ret = nouveau_mm_init(&pfb->vram, rsvd_head, size, 1);
+ if (ret)
+ return ret;
+
+ pfb->ram.type = NV_MEM_TYPE_STOLEN;
+ pfb->ram.stolen = (u64)nv_rd32(pfb, 0x100e10) << 12;
+ break;
+ default:
+ switch (nv_rd32(pfb, 0x100714) & 0x00000007) {
+ case 0: pfb->ram.type = NV_MEM_TYPE_DDR1; break;
+ case 1:
+ if (nouveau_fb_bios_memtype(bios) == NV_MEM_TYPE_DDR3)
+ pfb->ram.type = NV_MEM_TYPE_DDR3;
+ else
+ pfb->ram.type = NV_MEM_TYPE_DDR2;
+ break;
+ case 2: pfb->ram.type = NV_MEM_TYPE_GDDR3; break;
+ case 3: pfb->ram.type = NV_MEM_TYPE_GDDR4; break;
+ case 4: pfb->ram.type = NV_MEM_TYPE_GDDR5; break;
+ default:
+ break;
+ }
+
+ ret = nouveau_mm_init(&pfb->vram, rsvd_head, size,
+ nv50_fb_vram_rblock(pfb) >> 12);
+ if (ret)
+ return ret;
+
+ pfb->ram.ranks = (nv_rd32(pfb, 0x100200) & 0x4) ? 2 : 1;
+ break;
+ }
+
+ return nv_rd32(pfb, 0x100320);
+}
+
static int
nv50_fb_vram_new(struct nouveau_fb *pfb, u64 size, u32 align, u32 ncmin,
u32 memtype, struct nouveau_mem **pmem)
@@ -140,195 +235,6 @@ nv50_fb_vram_del(struct nouveau_fb *pfb, struct nouveau_mem **pmem)
kfree(mem);
}
-static u32
-nv50_vram_rblock(struct nv50_fb_priv *priv)
-{
- int i, parts, colbits, rowbitsa, rowbitsb, banks;
- u64 rowsize, predicted;
- u32 r0, r4, rt, ru, rblock_size;
-
- r0 = nv_rd32(priv, 0x100200);
- r4 = nv_rd32(priv, 0x100204);
- rt = nv_rd32(priv, 0x100250);
- ru = nv_rd32(priv, 0x001540);
- nv_debug(priv, "memcfg 0x%08x 0x%08x 0x%08x 0x%08x\n", r0, r4, rt, ru);
-
- for (i = 0, parts = 0; i < 8; i++) {
- if (ru & (0x00010000 << i))
- parts++;
- }
-
- colbits = (r4 & 0x0000f000) >> 12;
- rowbitsa = ((r4 & 0x000f0000) >> 16) + 8;
- rowbitsb = ((r4 & 0x00f00000) >> 20) + 8;
- banks = 1 << (((r4 & 0x03000000) >> 24) + 2);
-
- rowsize = parts * banks * (1 << colbits) * 8;
- predicted = rowsize << rowbitsa;
- if (r0 & 0x00000004)
- predicted += rowsize << rowbitsb;
-
- if (predicted != priv->base.ram.size) {
- nv_warn(priv, "memory controller reports %d MiB VRAM\n",
- (u32)(priv->base.ram.size >> 20));
- }
-
- rblock_size = rowsize;
- if (rt & 1)
- rblock_size *= 3;
-
- nv_debug(priv, "rblock %d bytes\n", rblock_size);
- return rblock_size;
-}
-
-static int
-nv50_fb_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
- struct nouveau_oclass *oclass, void *data, u32 size,
- struct nouveau_object **pobject)
-{
- struct nouveau_device *device = nv_device(parent);
- struct nouveau_bios *bios = nouveau_bios(device);
- const u32 rsvd_head = ( 256 * 1024) >> 12; /* vga memory */
- const u32 rsvd_tail = (1024 * 1024) >> 12; /* vbios etc */
- struct nv50_fb_priv *priv;
- u32 tags;
- int ret;
-
- ret = nouveau_fb_create(parent, engine, oclass, &priv);
- *pobject = nv_object(priv);
- if (ret)
- return ret;
-
- switch (nv_rd32(priv, 0x100714) & 0x00000007) {
- case 0: priv->base.ram.type = NV_MEM_TYPE_DDR1; break;
- case 1:
- if (nouveau_fb_bios_memtype(bios) == NV_MEM_TYPE_DDR3)
- priv->base.ram.type = NV_MEM_TYPE_DDR3;
- else
- priv->base.ram.type = NV_MEM_TYPE_DDR2;
- break;
- case 2: priv->base.ram.type = NV_MEM_TYPE_GDDR3; break;
- case 3: priv->base.ram.type = NV_MEM_TYPE_GDDR4; break;
- case 4: priv->base.ram.type = NV_MEM_TYPE_GDDR5; break;
- default:
- break;
- }
-
- priv->base.ram.size = nv_rd32(priv, 0x10020c);
- priv->base.ram.size = (priv->base.ram.size & 0xffffff00) |
- ((priv->base.ram.size & 0x000000ff) << 32);
-
- tags = nv_rd32(priv, 0x100320);
- ret = nouveau_mm_init(&priv->base.tags, 0, tags, 1);
- if (ret)
- return ret;
-
- nv_debug(priv, "%d compression tags\n", tags);
-
- size = (priv->base.ram.size >> 12) - rsvd_head - rsvd_tail;
- switch (device->chipset) {
- case 0xaa:
- case 0xac:
- case 0xaf: /* IGPs, no reordering, no real VRAM */
- ret = nouveau_mm_init(&priv->base.vram, rsvd_head, size, 1);
- if (ret)
- return ret;
-
- priv->base.ram.stolen = (u64)nv_rd32(priv, 0x100e10) << 12;
- priv->base.ram.type = NV_MEM_TYPE_STOLEN;
- break;
- default:
- ret = nouveau_mm_init(&priv->base.vram, rsvd_head, size,
- nv50_vram_rblock(priv) >> 12);
- if (ret)
- return ret;
-
- priv->base.ram.ranks = (nv_rd32(priv, 0x100200) & 0x4) ? 2 : 1;
- break;
- }
-
- priv->r100c08_page = alloc_page(GFP_KERNEL | __GFP_ZERO);
- if (priv->r100c08_page) {
- priv->r100c08 = pci_map_page(device->pdev, priv->r100c08_page,
- 0, PAGE_SIZE,
- PCI_DMA_BIDIRECTIONAL);
- if (pci_dma_mapping_error(device->pdev, priv->r100c08))
- nv_warn(priv, "failed 0x100c08 page map\n");
- } else {
- nv_warn(priv, "failed 0x100c08 page alloc\n");
- }
-
- priv->base.memtype_valid = nv50_fb_memtype_valid;
- priv->base.ram.get = nv50_fb_vram_new;
- priv->base.ram.put = nv50_fb_vram_del;
- return nouveau_fb_created(&priv->base);
-}
-
-static void
-nv50_fb_dtor(struct nouveau_object *object)
-{
- struct nouveau_device *device = nv_device(object);
- struct nv50_fb_priv *priv = (void *)object;
-
- if (priv->r100c08_page) {
- pci_unmap_page(device->pdev, priv->r100c08, PAGE_SIZE,
- PCI_DMA_BIDIRECTIONAL);
- __free_page(priv->r100c08_page);
- }
-
- nouveau_fb_destroy(&priv->base);
-}
-
-static int
-nv50_fb_init(struct nouveau_object *object)
-{
- struct nouveau_device *device = nv_device(object);
- struct nv50_fb_priv *priv = (void *)object;
- int ret;
-
- ret = nouveau_fb_init(&priv->base);
- if (ret)
- return ret;
-
- /* Not a clue what this is exactly. Without pointing it at a
- * scratch page, VRAM->GART blits with M2MF (as in DDX DFS)
- * cause IOMMU "read from address 0" errors (rh#561267)
- */
- nv_wr32(priv, 0x100c08, priv->r100c08 >> 8);
-
- /* This is needed to get meaningful information from 100c90
- * on traps. No idea what these values mean exactly. */
- switch (device->chipset) {
- case 0x50:
- nv_wr32(priv, 0x100c90, 0x000707ff);
- break;
- case 0xa3:
- case 0xa5:
- case 0xa8:
- nv_wr32(priv, 0x100c90, 0x000d0fff);
- break;
- case 0xaf:
- nv_wr32(priv, 0x100c90, 0x089d1fff);
- break;
- default:
- nv_wr32(priv, 0x100c90, 0x001d07ff);
- break;
- }
-
- return 0;
-}
-
-struct nouveau_oclass
-nv50_fb_oclass = {
- .handle = NV_SUBDEV(FB, 0x50),
- .ofuncs = &(struct nouveau_ofuncs) {
- .ctor = nv50_fb_ctor,
- .dtor = nv50_fb_dtor,
- .init = nv50_fb_init,
- .fini = _nouveau_fb_fini,
- },
-};
-
static const struct nouveau_enum vm_dispatch_subclients[] = {
{ 0x00000000, "GRCTX", NULL },
{ 0x00000001, "NOTIFY", NULL },
@@ -424,11 +330,11 @@ static const struct nouveau_enum vm_fault[] = {
{}
};
-void
-nv50_fb_trap(struct nouveau_fb *pfb, int display)
+static void
+nv50_fb_intr(struct nouveau_subdev *subdev)
{
- struct nouveau_device *device = nv_device(pfb);
- struct nv50_fb_priv *priv = (void *)pfb;
+ struct nouveau_device *device = nv_device(subdev);
+ struct nv50_fb_priv *priv = (void *)subdev;
const struct nouveau_enum *en, *cl;
u32 trap[6], idx, chan;
u8 st0, st1, st2, st3;
@@ -445,9 +351,6 @@ nv50_fb_trap(struct nouveau_fb *pfb, int display)
}
nv_wr32(priv, 0x100c90, idx | 0x80000000);
- if (!display)
- return;
-
/* decode status bits into something more useful */
if (device->chipset < 0xa3 ||
device->chipset == 0xaa || device->chipset == 0xac) {
@@ -494,3 +397,101 @@ nv50_fb_trap(struct nouveau_fb *pfb, int display)
else
printk("0x%08x\n", st1);
}
+
+static int
+nv50_fb_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
+ struct nouveau_oclass *oclass, void *data, u32 size,
+ struct nouveau_object **pobject)
+{
+ struct nouveau_device *device = nv_device(parent);
+ struct nv50_fb_priv *priv;
+ int ret;
+
+ ret = nouveau_fb_create(parent, engine, oclass, &priv);
+ *pobject = nv_object(priv);
+ if (ret)
+ return ret;
+
+ priv->r100c08_page = alloc_page(GFP_KERNEL | __GFP_ZERO);
+ if (priv->r100c08_page) {
+ priv->r100c08 = pci_map_page(device->pdev, priv->r100c08_page,
+ 0, PAGE_SIZE,
+ PCI_DMA_BIDIRECTIONAL);
+ if (pci_dma_mapping_error(device->pdev, priv->r100c08))
+ nv_warn(priv, "failed 0x100c08 page map\n");
+ } else {
+ nv_warn(priv, "failed 0x100c08 page alloc\n");
+ }
+
+ priv->base.memtype_valid = nv50_fb_memtype_valid;
+ priv->base.ram.init = nv50_fb_vram_init;
+ priv->base.ram.get = nv50_fb_vram_new;
+ priv->base.ram.put = nv50_fb_vram_del;
+ nv_subdev(priv)->intr = nv50_fb_intr;
+ return nouveau_fb_preinit(&priv->base);
+}
+
+static void
+nv50_fb_dtor(struct nouveau_object *object)
+{
+ struct nouveau_device *device = nv_device(object);
+ struct nv50_fb_priv *priv = (void *)object;
+
+ if (priv->r100c08_page) {
+ pci_unmap_page(device->pdev, priv->r100c08, PAGE_SIZE,
+ PCI_DMA_BIDIRECTIONAL);
+ __free_page(priv->r100c08_page);
+ }
+
+ nouveau_fb_destroy(&priv->base);
+}
+
+static int
+nv50_fb_init(struct nouveau_object *object)
+{
+ struct nouveau_device *device = nv_device(object);
+ struct nv50_fb_priv *priv = (void *)object;
+ int ret;
+
+ ret = nouveau_fb_init(&priv->base);
+ if (ret)
+ return ret;
+
+ /* Not a clue what this is exactly. Without pointing it at a
+ * scratch page, VRAM->GART blits with M2MF (as in DDX DFS)
+ * cause IOMMU "read from address 0" errors (rh#561267)
+ */
+ nv_wr32(priv, 0x100c08, priv->r100c08 >> 8);
+
+ /* This is needed to get meaningful information from 100c90
+ * on traps. No idea what these values mean exactly. */
+ switch (device->chipset) {
+ case 0x50:
+ nv_wr32(priv, 0x100c90, 0x000707ff);
+ break;
+ case 0xa3:
+ case 0xa5:
+ case 0xa8:
+ nv_wr32(priv, 0x100c90, 0x000d0fff);
+ break;
+ case 0xaf:
+ nv_wr32(priv, 0x100c90, 0x089d1fff);
+ break;
+ default:
+ nv_wr32(priv, 0x100c90, 0x001d07ff);
+ break;
+ }
+
+ return 0;
+}
+
+struct nouveau_oclass
+nv50_fb_oclass = {
+ .handle = NV_SUBDEV(FB, 0x50),
+ .ofuncs = &(struct nouveau_ofuncs) {
+ .ctor = nv50_fb_ctor,
+ .dtor = nv50_fb_dtor,
+ .init = nv50_fb_init,
+ .fini = _nouveau_fb_fini,
+ },
+};
diff --git a/drivers/gpu/drm/nouveau/core/subdev/fb/nvc0.c b/drivers/gpu/drm/nouveau/core/subdev/fb/nvc0.c
index 9f59f2bf0079..306bdf121452 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/fb/nvc0.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/fb/nvc0.c
@@ -62,6 +62,65 @@ nvc0_fb_memtype_valid(struct nouveau_fb *pfb, u32 tile_flags)
}
static int
+nvc0_fb_vram_init(struct nouveau_fb *pfb)
+{
+ struct nouveau_bios *bios = nouveau_bios(pfb);
+ const u32 rsvd_head = ( 256 * 1024) >> 12; /* vga memory */
+ const u32 rsvd_tail = (1024 * 1024) >> 12; /* vbios etc */
+ u32 parts = nv_rd32(pfb, 0x022438);
+ u32 pmask = nv_rd32(pfb, 0x022554);
+ u32 bsize = nv_rd32(pfb, 0x10f20c);
+ u32 offset, length;
+ bool uniform = true;
+ int ret, part;
+
+ nv_debug(pfb, "0x100800: 0x%08x\n", nv_rd32(pfb, 0x100800));
+ nv_debug(pfb, "parts 0x%08x mask 0x%08x\n", parts, pmask);
+
+ pfb->ram.type = nouveau_fb_bios_memtype(bios);
+ pfb->ram.ranks = (nv_rd32(pfb, 0x10f200) & 0x00000004) ? 2 : 1;
+
+ /* read amount of vram attached to each memory controller */
+ for (part = 0; part < parts; part++) {
+ if (!(pmask & (1 << part))) {
+ u32 psize = nv_rd32(pfb, 0x11020c + (part * 0x1000));
+ if (psize != bsize) {
+ if (psize < bsize)
+ bsize = psize;
+ uniform = false;
+ }
+
+ nv_debug(pfb, "%d: mem_amount 0x%08x\n", part, psize);
+ pfb->ram.size += (u64)psize << 20;
+ }
+ }
+
+ /* if all controllers have the same amount attached, there's no holes */
+ if (uniform) {
+ offset = rsvd_head;
+ length = (pfb->ram.size >> 12) - rsvd_head - rsvd_tail;
+ return nouveau_mm_init(&pfb->vram, offset, length, 1);
+ }
+
+ /* otherwise, address lowest common amount from 0GiB */
+ ret = nouveau_mm_init(&pfb->vram, rsvd_head, (bsize << 8) * parts, 1);
+ if (ret)
+ return ret;
+
+ /* and the rest starting from (8GiB + common_size) */
+ offset = (0x0200000000ULL >> 12) + (bsize << 8);
+ length = (pfb->ram.size >> 12) - (bsize << 8) - rsvd_tail;
+
+ ret = nouveau_mm_init(&pfb->vram, offset, length, 0);
+ if (ret) {
+ nouveau_mm_fini(&pfb->vram);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int
nvc0_fb_vram_new(struct nouveau_fb *pfb, u64 size, u32 align, u32 ncmin,
u32 memtype, struct nouveau_mem **pmem)
{
@@ -139,66 +198,6 @@ nvc0_fb_dtor(struct nouveau_object *object)
}
static int
-nvc0_vram_detect(struct nvc0_fb_priv *priv)
-{
- struct nouveau_bios *bios = nouveau_bios(priv);
- struct nouveau_fb *pfb = &priv->base;
- const u32 rsvd_head = ( 256 * 1024) >> 12; /* vga memory */
- const u32 rsvd_tail = (1024 * 1024) >> 12; /* vbios etc */
- u32 parts = nv_rd32(priv, 0x022438);
- u32 pmask = nv_rd32(priv, 0x022554);
- u32 bsize = nv_rd32(priv, 0x10f20c);
- u32 offset, length;
- bool uniform = true;
- int ret, part;
-
- nv_debug(priv, "0x100800: 0x%08x\n", nv_rd32(priv, 0x100800));
- nv_debug(priv, "parts 0x%08x mask 0x%08x\n", parts, pmask);
-
- priv->base.ram.type = nouveau_fb_bios_memtype(bios);
- priv->base.ram.ranks = (nv_rd32(priv, 0x10f200) & 0x00000004) ? 2 : 1;
-
- /* read amount of vram attached to each memory controller */
- for (part = 0; part < parts; part++) {
- if (!(pmask & (1 << part))) {
- u32 psize = nv_rd32(priv, 0x11020c + (part * 0x1000));
- if (psize != bsize) {
- if (psize < bsize)
- bsize = psize;
- uniform = false;
- }
-
- nv_debug(priv, "%d: mem_amount 0x%08x\n", part, psize);
- priv->base.ram.size += (u64)psize << 20;
- }
- }
-
- /* if all controllers have the same amount attached, there's no holes */
- if (uniform) {
- offset = rsvd_head;
- length = (priv->base.ram.size >> 12) - rsvd_head - rsvd_tail;
- return nouveau_mm_init(&pfb->vram, offset, length, 1);
- }
-
- /* otherwise, address lowest common amount from 0GiB */
- ret = nouveau_mm_init(&pfb->vram, rsvd_head, (bsize << 8) * parts, 1);
- if (ret)
- return ret;
-
- /* and the rest starting from (8GiB + common_size) */
- offset = (0x0200000000ULL >> 12) + (bsize << 8);
- length = (priv->base.ram.size >> 12) - (bsize << 8) - rsvd_tail;
-
- ret = nouveau_mm_init(&pfb->vram, offset, length, 0);
- if (ret) {
- nouveau_mm_fini(&pfb->vram);
- return ret;
- }
-
- return 0;
-}
-
-static int
nvc0_fb_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
struct nouveau_oclass *oclass, void *data, u32 size,
struct nouveau_object **pobject)
@@ -213,13 +212,10 @@ nvc0_fb_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
return ret;
priv->base.memtype_valid = nvc0_fb_memtype_valid;
+ priv->base.ram.init = nvc0_fb_vram_init;
priv->base.ram.get = nvc0_fb_vram_new;
priv->base.ram.put = nv50_fb_vram_del;
- ret = nvc0_vram_detect(priv);
- if (ret)
- return ret;
-
priv->r100c10_page = alloc_page(GFP_KERNEL | __GFP_ZERO);
if (!priv->r100c10_page)
return -ENOMEM;
@@ -229,7 +225,7 @@ nvc0_fb_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
if (pci_dma_mapping_error(device->pdev, priv->r100c10))
return -EFAULT;
- return nouveau_fb_created(&priv->base);
+ return nouveau_fb_preinit(&priv->base);
}
diff --git a/drivers/gpu/drm/nouveau/core/subdev/i2c/aux.c b/drivers/gpu/drm/nouveau/core/subdev/i2c/aux.c
index fe1ebf199ba9..dc27e794a851 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/i2c/aux.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/i2c/aux.c
@@ -50,7 +50,7 @@ auxch_init(struct nouveau_i2c *aux, int ch)
ctrl = nv_rd32(aux, 0x00e4e4 + (ch * 0x50));
udelay(1);
if (!timeout--) {
- AUX_ERR("begin idle timeout 0x%08x", ctrl);
+ AUX_ERR("begin idle timeout 0x%08x\n", ctrl);
return -EBUSY;
}
} while (ctrl & 0x03010000);
diff --git a/drivers/gpu/drm/nouveau/core/subdev/instmem/nv04.c b/drivers/gpu/drm/nouveau/core/subdev/instmem/nv04.c
index ba4d28b50368..f5bbd3834116 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/instmem/nv04.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/instmem/nv04.c
@@ -63,14 +63,14 @@ nv04_instobj_dtor(struct nouveau_object *object)
}
static u32
-nv04_instobj_rd32(struct nouveau_object *object, u32 addr)
+nv04_instobj_rd32(struct nouveau_object *object, u64 addr)
{
struct nv04_instobj_priv *node = (void *)object;
return nv_ro32(object->engine, node->mem->offset + addr);
}
static void
-nv04_instobj_wr32(struct nouveau_object *object, u32 addr, u32 data)
+nv04_instobj_wr32(struct nouveau_object *object, u64 addr, u32 data)
{
struct nv04_instobj_priv *node = (void *)object;
nv_wo32(object->engine, node->mem->offset + addr, data);
@@ -173,13 +173,13 @@ nv04_instmem_dtor(struct nouveau_object *object)
}
static u32
-nv04_instmem_rd32(struct nouveau_object *object, u32 addr)
+nv04_instmem_rd32(struct nouveau_object *object, u64 addr)
{
return nv_rd32(object, 0x700000 + addr);
}
static void
-nv04_instmem_wr32(struct nouveau_object *object, u32 addr, u32 data)
+nv04_instmem_wr32(struct nouveau_object *object, u64 addr, u32 data)
{
return nv_wr32(object, 0x700000 + addr, data);
}
diff --git a/drivers/gpu/drm/nouveau/core/subdev/instmem/nv40.c b/drivers/gpu/drm/nouveau/core/subdev/instmem/nv40.c
index 73c52ebd5932..da64253201ef 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/instmem/nv40.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/instmem/nv40.c
@@ -111,14 +111,14 @@ nv40_instmem_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
}
static u32
-nv40_instmem_rd32(struct nouveau_object *object, u32 addr)
+nv40_instmem_rd32(struct nouveau_object *object, u64 addr)
{
struct nv04_instmem_priv *priv = (void *)object;
return ioread32_native(priv->iomem + addr);
}
static void
-nv40_instmem_wr32(struct nouveau_object *object, u32 addr, u32 data)
+nv40_instmem_wr32(struct nouveau_object *object, u64 addr, u32 data)
{
struct nv04_instmem_priv *priv = (void *)object;
iowrite32_native(data, priv->iomem + addr);
diff --git a/drivers/gpu/drm/nouveau/core/subdev/instmem/nv50.c b/drivers/gpu/drm/nouveau/core/subdev/instmem/nv50.c
index 27ef0891d10b..cfc7e31461de 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/instmem/nv50.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/instmem/nv50.c
@@ -76,7 +76,7 @@ nv50_instobj_dtor(struct nouveau_object *object)
}
static u32
-nv50_instobj_rd32(struct nouveau_object *object, u32 offset)
+nv50_instobj_rd32(struct nouveau_object *object, u64 offset)
{
struct nv50_instmem_priv *priv = (void *)object->engine;
struct nv50_instobj_priv *node = (void *)object;
@@ -96,7 +96,7 @@ nv50_instobj_rd32(struct nouveau_object *object, u32 offset)
}
static void
-nv50_instobj_wr32(struct nouveau_object *object, u32 offset, u32 data)
+nv50_instobj_wr32(struct nouveau_object *object, u64 offset, u32 data)
{
struct nv50_instmem_priv *priv = (void *)object->engine;
struct nv50_instobj_priv *node = (void *)object;
diff --git a/drivers/gpu/drm/nouveau/core/subdev/mc/base.c b/drivers/gpu/drm/nouveau/core/subdev/mc/base.c
index de5721cfc4c2..8379aafa6e1b 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/mc/base.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/mc/base.c
@@ -30,20 +30,20 @@ nouveau_mc_intr(struct nouveau_subdev *subdev)
struct nouveau_mc *pmc = nouveau_mc(subdev);
const struct nouveau_mc_intr *map = pmc->intr_map;
struct nouveau_subdev *unit;
- u32 stat;
+ u32 stat, intr;
- stat = nv_rd32(pmc, 0x000100);
+ intr = stat = nv_rd32(pmc, 0x000100);
while (stat && map->stat) {
if (stat & map->stat) {
unit = nouveau_subdev(subdev, map->unit);
if (unit && unit->intr)
unit->intr(unit);
- stat &= ~map->stat;
+ intr &= ~map->stat;
}
map++;
}
- if (stat) {
+ if (intr) {
nv_error(pmc, "unknown intr 0x%08x\n", stat);
}
}
diff --git a/drivers/gpu/drm/nouveau/core/subdev/mc/nv50.c b/drivers/gpu/drm/nouveau/core/subdev/mc/nv50.c
index cedf33b02977..8d759f830323 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/mc/nv50.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/mc/nv50.c
@@ -39,6 +39,7 @@ nv50_mc_intr[] = {
{ 0x00200000, NVDEV_SUBDEV_GPIO },
{ 0x04000000, NVDEV_ENGINE_DISP },
{ 0x80000000, NVDEV_ENGINE_SW },
+ { 0x0000d101, NVDEV_SUBDEV_FB },
{},
};
diff --git a/drivers/gpu/drm/nouveau/core/subdev/mc/nv98.c b/drivers/gpu/drm/nouveau/core/subdev/mc/nv98.c
index a001e4c4d38d..ceb5c83f9459 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/mc/nv98.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/mc/nv98.c
@@ -40,6 +40,7 @@ nv98_mc_intr[] = {
{ 0x00400000, NVDEV_ENGINE_COPY0 }, /* NVA3- */
{ 0x04000000, NVDEV_ENGINE_DISP },
{ 0x80000000, NVDEV_ENGINE_SW },
+ { 0x0040d101, NVDEV_SUBDEV_FB },
{},
};
diff --git a/drivers/gpu/drm/nouveau/core/subdev/mc/nvc0.c b/drivers/gpu/drm/nouveau/core/subdev/mc/nvc0.c
index c2b81e30a17d..92796682722d 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/mc/nvc0.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/mc/nvc0.c
@@ -36,6 +36,7 @@ nvc0_mc_intr[] = {
{ 0x00000100, NVDEV_ENGINE_FIFO },
{ 0x00001000, NVDEV_ENGINE_GR },
{ 0x00008000, NVDEV_ENGINE_BSP },
+ { 0x00020000, NVDEV_ENGINE_VP },
{ 0x00100000, NVDEV_SUBDEV_TIMER },
{ 0x00200000, NVDEV_SUBDEV_GPIO },
{ 0x02000000, NVDEV_SUBDEV_LTCG },
diff --git a/drivers/gpu/drm/nouveau/core/subdev/vm/nv41.c b/drivers/gpu/drm/nouveau/core/subdev/vm/nv41.c
index 49050d991e75..9474cfca6e4c 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/vm/nv41.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/vm/nv41.c
@@ -67,7 +67,7 @@ nv41_vm_unmap(struct nouveau_gpuobj *pgt, u32 pte, u32 cnt)
static void
nv41_vm_flush(struct nouveau_vm *vm)
{
- struct nv04_vm_priv *priv = (void *)vm->vmm;
+ struct nv04_vmmgr_priv *priv = (void *)vm->vmm;
mutex_lock(&nv_subdev(priv)->mutex);
nv_wr32(priv, 0x100810, 0x00000022);
diff --git a/drivers/gpu/drm/nouveau/nouveau_abi16.c b/drivers/gpu/drm/nouveau/nouveau_abi16.c
index cc79c796afee..41241922263f 100644
--- a/drivers/gpu/drm/nouveau/nouveau_abi16.c
+++ b/drivers/gpu/drm/nouveau/nouveau_abi16.c
@@ -241,15 +241,31 @@ nouveau_abi16_ioctl_channel_alloc(ABI16_IOCTL_ARGS)
if (unlikely(!abi16))
return -ENOMEM;
- client = nv_client(abi16->client);
- if (init->fb_ctxdma_handle == ~0 || init->tt_ctxdma_handle == ~0)
- return nouveau_abi16_put(abi16, -EINVAL);
+ if (!drm->channel)
+ return nouveau_abi16_put(abi16, -ENODEV);
+ client = nv_client(abi16->client);
device = nv_device(abi16->device);
imem = nouveau_instmem(device);
pfb = nouveau_fb(device);
+ /* hack to allow channel engine type specification on kepler */
+ if (device->card_type >= NV_E0) {
+ if (init->fb_ctxdma_handle != ~0)
+ init->fb_ctxdma_handle = NVE0_CHANNEL_IND_ENGINE_GR;
+ else
+ init->fb_ctxdma_handle = init->tt_ctxdma_handle;
+
+ /* allow flips to be executed if this is a graphics channel */
+ init->tt_ctxdma_handle = 0;
+ if (init->fb_ctxdma_handle == NVE0_CHANNEL_IND_ENGINE_GR)
+ init->tt_ctxdma_handle = 1;
+ }
+
+ if (init->fb_ctxdma_handle == ~0 || init->tt_ctxdma_handle == ~0)
+ return nouveau_abi16_put(abi16, -EINVAL);
+
/* allocate "abi16 channel" data and make up a handle for it */
init->channel = ffsll(~abi16->handles);
if (!init->channel--)
@@ -264,11 +280,6 @@ nouveau_abi16_ioctl_channel_alloc(ABI16_IOCTL_ARGS)
abi16->handles |= (1 << init->channel);
/* create channel object and initialise dma and fence management */
- if (device->card_type >= NV_E0) {
- init->fb_ctxdma_handle = NVE0_CHANNEL_IND_ENGINE_GR;
- init->tt_ctxdma_handle = 0;
- }
-
ret = nouveau_channel_new(drm, cli, NVDRM_DEVICE, NVDRM_CHAN |
init->channel, init->fb_ctxdma_handle,
init->tt_ctxdma_handle, &chan->chan);
@@ -378,7 +389,7 @@ nouveau_abi16_ioctl_notifierobj_alloc(ABI16_IOCTL_ARGS)
struct nouveau_abi16_chan *chan, *temp;
struct nouveau_abi16_ntfy *ntfy;
struct nouveau_object *object;
- struct nv_dma_class args;
+ struct nv_dma_class args = {};
int ret;
if (unlikely(!abi16))
diff --git a/drivers/gpu/drm/nouveau/nouveau_acpi.c b/drivers/gpu/drm/nouveau/nouveau_acpi.c
index 48783e14114c..d97f20069d3e 100644
--- a/drivers/gpu/drm/nouveau/nouveau_acpi.c
+++ b/drivers/gpu/drm/nouveau/nouveau_acpi.c
@@ -35,6 +35,14 @@ static struct nouveau_dsm_priv {
acpi_handle rom_handle;
} nouveau_dsm_priv;
+bool nouveau_is_optimus(void) {
+ return nouveau_dsm_priv.optimus_detected;
+}
+
+bool nouveau_is_v1_dsm(void) {
+ return nouveau_dsm_priv.dsm_detected;
+}
+
#define NOUVEAU_DSM_HAS_MUX 0x1
#define NOUVEAU_DSM_HAS_OPT 0x2
@@ -183,9 +191,7 @@ static int nouveau_dsm_set_discrete_state(acpi_handle handle, enum vga_switchero
static int nouveau_dsm_switchto(enum vga_switcheroo_client_id id)
{
- /* perhaps the _DSM functions are mutually exclusive, but prepare for
- * the future */
- if (!nouveau_dsm_priv.dsm_detected && nouveau_dsm_priv.optimus_detected)
+ if (!nouveau_dsm_priv.dsm_detected)
return 0;
if (id == VGA_SWITCHEROO_IGD)
return nouveau_dsm_switch_mux(nouveau_dsm_priv.dhandle, NOUVEAU_DSM_LED_STAMINA);
@@ -201,7 +207,7 @@ static int nouveau_dsm_power_state(enum vga_switcheroo_client_id id,
/* Optimus laptops have the card already disabled in
* nouveau_switcheroo_set_state */
- if (!nouveau_dsm_priv.dsm_detected && nouveau_dsm_priv.optimus_detected)
+ if (!nouveau_dsm_priv.dsm_detected)
return 0;
return nouveau_dsm_set_discrete_state(nouveau_dsm_priv.dhandle, state);
@@ -283,24 +289,24 @@ static bool nouveau_dsm_detect(void)
has_optimus = 1;
}
- if (vga_count == 2 && has_dsm && guid_valid) {
+ /* find the optimus DSM or the old v1 DSM */
+ if (has_optimus == 1) {
acpi_get_name(nouveau_dsm_priv.dhandle, ACPI_FULL_PATHNAME,
&buffer);
- printk(KERN_INFO "VGA switcheroo: detected DSM switching method %s handle\n",
+ printk(KERN_INFO "VGA switcheroo: detected Optimus DSM method %s handle\n",
acpi_method_name);
- nouveau_dsm_priv.dsm_detected = true;
+ nouveau_dsm_priv.optimus_detected = true;
ret = true;
- }
-
- if (has_optimus == 1) {
+ } else if (vga_count == 2 && has_dsm && guid_valid) {
acpi_get_name(nouveau_dsm_priv.dhandle, ACPI_FULL_PATHNAME,
&buffer);
- printk(KERN_INFO "VGA switcheroo: detected Optimus DSM method %s handle\n",
+ printk(KERN_INFO "VGA switcheroo: detected DSM switching method %s handle\n",
acpi_method_name);
- nouveau_dsm_priv.optimus_detected = true;
+ nouveau_dsm_priv.dsm_detected = true;
ret = true;
}
+
return ret;
}
diff --git a/drivers/gpu/drm/nouveau/nouveau_acpi.h b/drivers/gpu/drm/nouveau/nouveau_acpi.h
index 08af67722b57..d0da230d7706 100644
--- a/drivers/gpu/drm/nouveau/nouveau_acpi.h
+++ b/drivers/gpu/drm/nouveau/nouveau_acpi.h
@@ -4,6 +4,8 @@
#define ROM_BIOS_PAGE 4096
#if defined(CONFIG_ACPI)
+bool nouveau_is_optimus(void);
+bool nouveau_is_v1_dsm(void);
void nouveau_register_dsm_handler(void);
void nouveau_unregister_dsm_handler(void);
void nouveau_switcheroo_optimus_dsm(void);
@@ -11,6 +13,8 @@ int nouveau_acpi_get_bios_chunk(uint8_t *bios, int offset, int len);
bool nouveau_acpi_rom_supported(struct pci_dev *pdev);
void *nouveau_acpi_edid(struct drm_device *, struct drm_connector *);
#else
+static inline bool nouveau_is_optimus(void) { return false; };
+static inline bool nouveau_is_v1_dsm(void) { return false; };
static inline void nouveau_register_dsm_handler(void) {}
static inline void nouveau_unregister_dsm_handler(void) {}
static inline void nouveau_switcheroo_optimus_dsm(void) {}
diff --git a/drivers/gpu/drm/nouveau/nouveau_bios.c b/drivers/gpu/drm/nouveau/nouveau_bios.c
index 09fdef235882..865eddfa30a7 100644
--- a/drivers/gpu/drm/nouveau/nouveau_bios.c
+++ b/drivers/gpu/drm/nouveau/nouveau_bios.c
@@ -624,206 +624,6 @@ int nouveau_bios_parse_lvds_table(struct drm_device *dev, int pxclk, bool *dl, b
return 0;
}
-/* BIT 'U'/'d' table encoder subtables have hashes matching them to
- * a particular set of encoders.
- *
- * This function returns true if a particular DCB entry matches.
- */
-bool
-bios_encoder_match(struct dcb_output *dcb, u32 hash)
-{
- if ((hash & 0x000000f0) != (dcb->location << 4))
- return false;
- if ((hash & 0x0000000f) != dcb->type)
- return false;
- if (!(hash & (dcb->or << 16)))
- return false;
-
- switch (dcb->type) {
- case DCB_OUTPUT_TMDS:
- case DCB_OUTPUT_LVDS:
- case DCB_OUTPUT_DP:
- if (hash & 0x00c00000) {
- if (!(hash & (dcb->sorconf.link << 22)))
- return false;
- }
- default:
- return true;
- }
-}
-
-int
-nouveau_bios_run_display_table(struct drm_device *dev, u16 type, int pclk,
- struct dcb_output *dcbent, int crtc)
-{
- /*
- * The display script table is located by the BIT 'U' table.
- *
- * It contains an array of pointers to various tables describing
- * a particular output type. The first 32-bits of the output
- * tables contains similar information to a DCB entry, and is
- * used to decide whether that particular table is suitable for
- * the output you want to access.
- *
- * The "record header length" field here seems to indicate the
- * offset of the first configuration entry in the output tables.
- * This is 10 on most cards I've seen, but 12 has been witnessed
- * on DP cards, and there's another script pointer within the
- * header.
- *
- * offset + 0 ( 8 bits): version
- * offset + 1 ( 8 bits): header length
- * offset + 2 ( 8 bits): record length
- * offset + 3 ( 8 bits): number of records
- * offset + 4 ( 8 bits): record header length
- * offset + 5 (16 bits): pointer to first output script table
- */
-
- struct nouveau_drm *drm = nouveau_drm(dev);
- struct nvbios *bios = &drm->vbios;
- uint8_t *table = &bios->data[bios->display.script_table_ptr];
- uint8_t *otable = NULL;
- uint16_t script;
- int i;
-
- if (!bios->display.script_table_ptr) {
- NV_ERROR(drm, "No pointer to output script table\n");
- return 1;
- }
-
- /*
- * Nothing useful has been in any of the pre-2.0 tables I've seen,
- * so until they are, we really don't need to care.
- */
- if (table[0] < 0x20)
- return 1;
-
- if (table[0] != 0x20 && table[0] != 0x21) {
- NV_ERROR(drm, "Output script table version 0x%02x unknown\n",
- table[0]);
- return 1;
- }
-
- /*
- * The output script tables describing a particular output type
- * look as follows:
- *
- * offset + 0 (32 bits): output this table matches (hash of DCB)
- * offset + 4 ( 8 bits): unknown
- * offset + 5 ( 8 bits): number of configurations
- * offset + 6 (16 bits): pointer to some script
- * offset + 8 (16 bits): pointer to some script
- *
- * headerlen == 10
- * offset + 10 : configuration 0
- *
- * headerlen == 12
- * offset + 10 : pointer to some script
- * offset + 12 : configuration 0
- *
- * Each config entry is as follows:
- *
- * offset + 0 (16 bits): unknown, assumed to be a match value
- * offset + 2 (16 bits): pointer to script table (clock set?)
- * offset + 4 (16 bits): pointer to script table (reset?)
- *
- * There doesn't appear to be a count value to say how many
- * entries exist in each script table, instead, a 0 value in
- * the first 16-bit word seems to indicate both the end of the
- * list and the default entry. The second 16-bit word in the
- * script tables is a pointer to the script to execute.
- */
-
- NV_DEBUG(drm, "Searching for output entry for %d %d %d\n",
- dcbent->type, dcbent->location, dcbent->or);
- for (i = 0; i < table[3]; i++) {
- otable = ROMPTR(dev, table[table[1] + (i * table[2])]);
- if (otable && bios_encoder_match(dcbent, ROM32(otable[0])))
- break;
- }
-
- if (!otable) {
- NV_DEBUG(drm, "failed to match any output table\n");
- return 1;
- }
-
- if (pclk < -2 || pclk > 0) {
- /* Try to find matching script table entry */
- for (i = 0; i < otable[5]; i++) {
- if (ROM16(otable[table[4] + i*6]) == type)
- break;
- }
-
- if (i == otable[5]) {
- NV_ERROR(drm, "Table 0x%04x not found for %d/%d, "
- "using first\n",
- type, dcbent->type, dcbent->or);
- i = 0;
- }
- }
-
- if (pclk == 0) {
- script = ROM16(otable[6]);
- if (!script) {
- NV_DEBUG(drm, "output script 0 not found\n");
- return 1;
- }
-
- NV_DEBUG(drm, "0x%04X: parsing output script 0\n", script);
- nouveau_bios_run_init_table(dev, script, dcbent, crtc);
- } else
- if (pclk == -1) {
- script = ROM16(otable[8]);
- if (!script) {
- NV_DEBUG(drm, "output script 1 not found\n");
- return 1;
- }
-
- NV_DEBUG(drm, "0x%04X: parsing output script 1\n", script);
- nouveau_bios_run_init_table(dev, script, dcbent, crtc);
- } else
- if (pclk == -2) {
- if (table[4] >= 12)
- script = ROM16(otable[10]);
- else
- script = 0;
- if (!script) {
- NV_DEBUG(drm, "output script 2 not found\n");
- return 1;
- }
-
- NV_DEBUG(drm, "0x%04X: parsing output script 2\n", script);
- nouveau_bios_run_init_table(dev, script, dcbent, crtc);
- } else
- if (pclk > 0) {
- script = ROM16(otable[table[4] + i*6 + 2]);
- if (script)
- script = clkcmptable(bios, script, pclk);
- if (!script) {
- NV_DEBUG(drm, "clock script 0 not found\n");
- return 1;
- }
-
- NV_DEBUG(drm, "0x%04X: parsing clock script 0\n", script);
- nouveau_bios_run_init_table(dev, script, dcbent, crtc);
- } else
- if (pclk < 0) {
- script = ROM16(otable[table[4] + i*6 + 4]);
- if (script)
- script = clkcmptable(bios, script, -pclk);
- if (!script) {
- NV_DEBUG(drm, "clock script 1 not found\n");
- return 1;
- }
-
- NV_DEBUG(drm, "0x%04X: parsing clock script 1\n", script);
- nouveau_bios_run_init_table(dev, script, dcbent, crtc);
- }
-
- return 0;
-}
-
-
int run_tmds_table(struct drm_device *dev, struct dcb_output *dcbent, int head, int pxclk)
{
/*
@@ -1212,31 +1012,6 @@ static int parse_bit_tmds_tbl_entry(struct drm_device *dev, struct nvbios *bios,
return 0;
}
-static int
-parse_bit_U_tbl_entry(struct drm_device *dev, struct nvbios *bios,
- struct bit_entry *bitentry)
-{
- /*
- * Parses the pointer to the G80 output script tables
- *
- * Starting at bitentry->offset:
- *
- * offset + 0 (16 bits): output script table pointer
- */
-
- struct nouveau_drm *drm = nouveau_drm(dev);
- uint16_t outputscripttableptr;
-
- if (bitentry->length != 3) {
- NV_ERROR(drm, "Do not understand BIT U table\n");
- return -EINVAL;
- }
-
- outputscripttableptr = ROM16(bios->data[bitentry->offset]);
- bios->display.script_table_ptr = outputscripttableptr;
- return 0;
-}
-
struct bit_table {
const char id;
int (* const parse_fn)(struct drm_device *, struct nvbios *, struct bit_entry *);
@@ -1313,7 +1088,6 @@ parse_bit_structure(struct nvbios *bios, const uint16_t bitoffset)
parse_bit_table(bios, bitoffset, &BIT_TABLE('M', M)); /* memory? */
parse_bit_table(bios, bitoffset, &BIT_TABLE('L', lvds));
parse_bit_table(bios, bitoffset, &BIT_TABLE('T', tmds));
- parse_bit_table(bios, bitoffset, &BIT_TABLE('U', U));
return 0;
}
@@ -2324,7 +2098,7 @@ nouveau_run_vbios_init(struct drm_device *dev)
{
struct nouveau_drm *drm = nouveau_drm(dev);
struct nvbios *bios = &drm->vbios;
- int i, ret = 0;
+ int ret = 0;
/* Reset the BIOS head to 0. */
bios->state.crtchead = 0;
@@ -2337,13 +2111,6 @@ nouveau_run_vbios_init(struct drm_device *dev)
bios->fp.lvds_init_run = false;
}
- if (nv_device(drm->device)->card_type >= NV_50) {
- for (i = 0; bios->execute && i < bios->dcb.entries; i++) {
- nouveau_bios_run_display_table(dev, 0, 0,
- &bios->dcb.entry[i], -1);
- }
- }
-
return ret;
}
diff --git a/drivers/gpu/drm/nouveau/nouveau_bios.h b/drivers/gpu/drm/nouveau/nouveau_bios.h
index 3befbb821a56..f68c54ca422f 100644
--- a/drivers/gpu/drm/nouveau/nouveau_bios.h
+++ b/drivers/gpu/drm/nouveau/nouveau_bios.h
@@ -128,12 +128,6 @@ struct nvbios {
} state;
struct {
- struct dcb_output *output;
- int crtc;
- uint16_t script_table_ptr;
- } display;
-
- struct {
uint16_t fptablepointer; /* also used by tmds */
uint16_t fpxlatetableptr;
int xlatwidth;
@@ -185,8 +179,6 @@ void nouveau_bios_takedown(struct drm_device *dev);
int nouveau_run_vbios_init(struct drm_device *);
struct dcb_connector_table_entry *
nouveau_bios_connector_entry(struct drm_device *, int index);
-int nouveau_bios_run_display_table(struct drm_device *, u16 id, int clk,
- struct dcb_output *, int crtc);
bool nouveau_bios_fp_mode(struct drm_device *, struct drm_display_mode *);
uint8_t *nouveau_bios_embedded_edid(struct drm_device *);
int nouveau_bios_parse_lvds_table(struct drm_device *, int pxclk,
@@ -195,6 +187,5 @@ int run_tmds_table(struct drm_device *, struct dcb_output *,
int head, int pxclk);
int call_lvds_script(struct drm_device *, struct dcb_output *, int head,
enum LVDS_script, int pxclk);
-bool bios_encoder_match(struct dcb_output *, u32 hash);
#endif
diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.c b/drivers/gpu/drm/nouveau/nouveau_bo.c
index 4c950b4cf416..5614c89148cb 100644
--- a/drivers/gpu/drm/nouveau/nouveau_bo.c
+++ b/drivers/gpu/drm/nouveau/nouveau_bo.c
@@ -315,7 +315,7 @@ nouveau_bo_pin(struct nouveau_bo *nvbo, uint32_t memtype)
nouveau_bo_placement_set(nvbo, memtype, 0);
- ret = nouveau_bo_validate(nvbo, false, false, false);
+ ret = nouveau_bo_validate(nvbo, false, false);
if (ret == 0) {
switch (bo->mem.mem_type) {
case TTM_PL_VRAM:
@@ -351,7 +351,7 @@ nouveau_bo_unpin(struct nouveau_bo *nvbo)
nouveau_bo_placement_set(nvbo, bo->mem.placement, 0);
- ret = nouveau_bo_validate(nvbo, false, false, false);
+ ret = nouveau_bo_validate(nvbo, false, false);
if (ret == 0) {
switch (bo->mem.mem_type) {
case TTM_PL_VRAM:
@@ -392,12 +392,12 @@ nouveau_bo_unmap(struct nouveau_bo *nvbo)
int
nouveau_bo_validate(struct nouveau_bo *nvbo, bool interruptible,
- bool no_wait_reserve, bool no_wait_gpu)
+ bool no_wait_gpu)
{
int ret;
- ret = ttm_bo_validate(&nvbo->bo, &nvbo->placement, interruptible,
- no_wait_reserve, no_wait_gpu);
+ ret = ttm_bo_validate(&nvbo->bo, &nvbo->placement,
+ interruptible, no_wait_gpu);
if (ret)
return ret;
@@ -556,8 +556,7 @@ nouveau_bo_evict_flags(struct ttm_buffer_object *bo, struct ttm_placement *pl)
static int
nouveau_bo_move_accel_cleanup(struct nouveau_channel *chan,
struct nouveau_bo *nvbo, bool evict,
- bool no_wait_reserve, bool no_wait_gpu,
- struct ttm_mem_reg *new_mem)
+ bool no_wait_gpu, struct ttm_mem_reg *new_mem)
{
struct nouveau_fence *fence = NULL;
int ret;
@@ -567,7 +566,7 @@ nouveau_bo_move_accel_cleanup(struct nouveau_channel *chan,
return ret;
ret = ttm_bo_move_accel_cleanup(&nvbo->bo, fence, evict,
- no_wait_reserve, no_wait_gpu, new_mem);
+ no_wait_gpu, new_mem);
nouveau_fence_unref(&fence);
return ret;
}
@@ -965,8 +964,7 @@ nouveau_vma_getmap(struct nouveau_channel *chan, struct nouveau_bo *nvbo,
static int
nouveau_bo_move_m2mf(struct ttm_buffer_object *bo, int evict, bool intr,
- bool no_wait_reserve, bool no_wait_gpu,
- struct ttm_mem_reg *new_mem)
+ bool no_wait_gpu, struct ttm_mem_reg *new_mem)
{
struct nouveau_drm *drm = nouveau_bdev(bo->bdev);
struct nouveau_channel *chan = chan = drm->channel;
@@ -995,7 +993,6 @@ nouveau_bo_move_m2mf(struct ttm_buffer_object *bo, int evict, bool intr,
ret = drm->ttm.move(chan, bo, &bo->mem, new_mem);
if (ret == 0) {
ret = nouveau_bo_move_accel_cleanup(chan, nvbo, evict,
- no_wait_reserve,
no_wait_gpu, new_mem);
}
@@ -1064,8 +1061,7 @@ nouveau_bo_move_init(struct nouveau_drm *drm)
static int
nouveau_bo_move_flipd(struct ttm_buffer_object *bo, bool evict, bool intr,
- bool no_wait_reserve, bool no_wait_gpu,
- struct ttm_mem_reg *new_mem)
+ bool no_wait_gpu, struct ttm_mem_reg *new_mem)
{
u32 placement_memtype = TTM_PL_FLAG_TT | TTM_PL_MASK_CACHING;
struct ttm_placement placement;
@@ -1078,7 +1074,7 @@ nouveau_bo_move_flipd(struct ttm_buffer_object *bo, bool evict, bool intr,
tmp_mem = *new_mem;
tmp_mem.mm_node = NULL;
- ret = ttm_bo_mem_space(bo, &placement, &tmp_mem, intr, no_wait_reserve, no_wait_gpu);
+ ret = ttm_bo_mem_space(bo, &placement, &tmp_mem, intr, no_wait_gpu);
if (ret)
return ret;
@@ -1086,11 +1082,11 @@ nouveau_bo_move_flipd(struct ttm_buffer_object *bo, bool evict, bool intr,
if (ret)
goto out;
- ret = nouveau_bo_move_m2mf(bo, true, intr, no_wait_reserve, no_wait_gpu, &tmp_mem);
+ ret = nouveau_bo_move_m2mf(bo, true, intr, no_wait_gpu, &tmp_mem);
if (ret)
goto out;
- ret = ttm_bo_move_ttm(bo, true, no_wait_reserve, no_wait_gpu, new_mem);
+ ret = ttm_bo_move_ttm(bo, true, no_wait_gpu, new_mem);
out:
ttm_bo_mem_put(bo, &tmp_mem);
return ret;
@@ -1098,8 +1094,7 @@ out:
static int
nouveau_bo_move_flips(struct ttm_buffer_object *bo, bool evict, bool intr,
- bool no_wait_reserve, bool no_wait_gpu,
- struct ttm_mem_reg *new_mem)
+ bool no_wait_gpu, struct ttm_mem_reg *new_mem)
{
u32 placement_memtype = TTM_PL_FLAG_TT | TTM_PL_MASK_CACHING;
struct ttm_placement placement;
@@ -1112,15 +1107,15 @@ nouveau_bo_move_flips(struct ttm_buffer_object *bo, bool evict, bool intr,
tmp_mem = *new_mem;
tmp_mem.mm_node = NULL;
- ret = ttm_bo_mem_space(bo, &placement, &tmp_mem, intr, no_wait_reserve, no_wait_gpu);
+ ret = ttm_bo_mem_space(bo, &placement, &tmp_mem, intr, no_wait_gpu);
if (ret)
return ret;
- ret = ttm_bo_move_ttm(bo, true, no_wait_reserve, no_wait_gpu, &tmp_mem);
+ ret = ttm_bo_move_ttm(bo, true, no_wait_gpu, &tmp_mem);
if (ret)
goto out;
- ret = nouveau_bo_move_m2mf(bo, true, intr, no_wait_reserve, no_wait_gpu, new_mem);
+ ret = nouveau_bo_move_m2mf(bo, true, intr, no_wait_gpu, new_mem);
if (ret)
goto out;
@@ -1195,8 +1190,7 @@ nouveau_bo_vm_cleanup(struct ttm_buffer_object *bo,
static int
nouveau_bo_move(struct ttm_buffer_object *bo, bool evict, bool intr,
- bool no_wait_reserve, bool no_wait_gpu,
- struct ttm_mem_reg *new_mem)
+ bool no_wait_gpu, struct ttm_mem_reg *new_mem)
{
struct nouveau_drm *drm = nouveau_bdev(bo->bdev);
struct nouveau_bo *nvbo = nouveau_bo(bo);
@@ -1220,23 +1214,26 @@ nouveau_bo_move(struct ttm_buffer_object *bo, bool evict, bool intr,
/* CPU copy if we have no accelerated method available */
if (!drm->ttm.move) {
- ret = ttm_bo_move_memcpy(bo, evict, no_wait_reserve, no_wait_gpu, new_mem);
+ ret = ttm_bo_move_memcpy(bo, evict, no_wait_gpu, new_mem);
goto out;
}
/* Hardware assisted copy. */
if (new_mem->mem_type == TTM_PL_SYSTEM)
- ret = nouveau_bo_move_flipd(bo, evict, intr, no_wait_reserve, no_wait_gpu, new_mem);
+ ret = nouveau_bo_move_flipd(bo, evict, intr,
+ no_wait_gpu, new_mem);
else if (old_mem->mem_type == TTM_PL_SYSTEM)
- ret = nouveau_bo_move_flips(bo, evict, intr, no_wait_reserve, no_wait_gpu, new_mem);
+ ret = nouveau_bo_move_flips(bo, evict, intr,
+ no_wait_gpu, new_mem);
else
- ret = nouveau_bo_move_m2mf(bo, evict, intr, no_wait_reserve, no_wait_gpu, new_mem);
+ ret = nouveau_bo_move_m2mf(bo, evict, intr,
+ no_wait_gpu, new_mem);
if (!ret)
goto out;
/* Fallback to software copy. */
- ret = ttm_bo_move_memcpy(bo, evict, no_wait_reserve, no_wait_gpu, new_mem);
+ ret = ttm_bo_move_memcpy(bo, evict, no_wait_gpu, new_mem);
out:
if (nv_device(drm->device)->card_type < NV_50) {
@@ -1343,7 +1340,7 @@ nouveau_ttm_fault_reserve_notify(struct ttm_buffer_object *bo)
nvbo->placement.fpfn = 0;
nvbo->placement.lpfn = mappable;
nouveau_bo_placement_set(nvbo, TTM_PL_FLAG_VRAM, 0);
- return nouveau_bo_validate(nvbo, false, true, false);
+ return nouveau_bo_validate(nvbo, false, false);
}
static int
diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.h b/drivers/gpu/drm/nouveau/nouveau_bo.h
index dec51b1098fe..25ca37989d2c 100644
--- a/drivers/gpu/drm/nouveau/nouveau_bo.h
+++ b/drivers/gpu/drm/nouveau/nouveau_bo.h
@@ -76,7 +76,7 @@ u32 nouveau_bo_rd32(struct nouveau_bo *, unsigned index);
void nouveau_bo_wr32(struct nouveau_bo *, unsigned index, u32 val);
void nouveau_bo_fence(struct nouveau_bo *, struct nouveau_fence *);
int nouveau_bo_validate(struct nouveau_bo *, bool interruptible,
- bool no_wait_reserve, bool no_wait_gpu);
+ bool no_wait_gpu);
struct nouveau_vma *
nouveau_bo_vma_find(struct nouveau_bo *, struct nouveau_vm *);
diff --git a/drivers/gpu/drm/nouveau/nouveau_chan.c b/drivers/gpu/drm/nouveau/nouveau_chan.c
index c1d7301c0e9c..174300b6a02e 100644
--- a/drivers/gpu/drm/nouveau/nouveau_chan.c
+++ b/drivers/gpu/drm/nouveau/nouveau_chan.c
@@ -76,6 +76,8 @@ nouveau_channel_del(struct nouveau_channel **pchan)
nouveau_object_del(client, NVDRM_DEVICE, chan->push.handle);
nouveau_bo_vma_del(chan->push.buffer, &chan->push.vma);
nouveau_bo_unmap(chan->push.buffer);
+ if (chan->push.buffer && chan->push.buffer->pin_refcnt)
+ nouveau_bo_unpin(chan->push.buffer);
nouveau_bo_ref(NULL, &chan->push.buffer);
kfree(chan);
}
@@ -267,7 +269,7 @@ nouveau_channel_init(struct nouveau_channel *chan, u32 vram, u32 gart)
struct nouveau_fb *pfb = nouveau_fb(device);
struct nouveau_software_chan *swch;
struct nouveau_object *object;
- struct nv_dma_class args;
+ struct nv_dma_class args = {};
int ret, i;
/* allocate dma objects to cover all allowed vram, and gart */
@@ -346,7 +348,7 @@ nouveau_channel_init(struct nouveau_channel *chan, u32 vram, u32 gart)
/* allocate software object class (used for fences on <= nv05, and
* to signal flip completion), bind it to a subchannel.
*/
- if (chan != chan->drm->cechan) {
+ if ((device->card_type < NV_E0) || gart /* nve0: want_nvsw */) {
ret = nouveau_object_new(nv_object(client), chan->handle,
NvSw, nouveau_abi16_swclass(chan->drm),
NULL, 0, &object);
diff --git a/drivers/gpu/drm/nouveau/nouveau_connector.c b/drivers/gpu/drm/nouveau/nouveau_connector.c
index 9a6e2cb282dc..ac340ba32017 100644
--- a/drivers/gpu/drm/nouveau/nouveau_connector.c
+++ b/drivers/gpu/drm/nouveau/nouveau_connector.c
@@ -110,7 +110,6 @@ nouveau_connector_destroy(struct drm_connector *connector)
dev = nv_connector->base.dev;
drm = nouveau_drm(dev);
gpio = nouveau_gpio(drm->device);
- NV_DEBUG(drm, "\n");
if (gpio && nv_connector->hpd != DCB_GPIO_UNUSED) {
gpio->isr_del(gpio, 0, nv_connector->hpd, 0xff,
@@ -221,7 +220,7 @@ nouveau_connector_set_encoder(struct drm_connector *connector,
}
if (nv_connector->type == DCB_CONNECTOR_DVI_I) {
- drm_connector_property_set_value(connector,
+ drm_object_property_set_value(&connector->base,
dev->mode_config.dvi_i_subconnector_property,
nv_encoder->dcb->type == DCB_OUTPUT_TMDS ?
DRM_MODE_SUBCONNECTOR_DVID :
@@ -355,7 +354,7 @@ nouveau_connector_detect_lvds(struct drm_connector *connector, bool force)
* valid - it's not (rh#613284)
*/
if (nv_encoder->dcb->lvdsconf.use_acpi_for_edid) {
- if (!(nv_connector->edid = nouveau_acpi_edid(dev, connector))) {
+ if ((nv_connector->edid = nouveau_acpi_edid(dev, connector))) {
status = connector_status_connected;
goto out;
}
@@ -929,8 +928,6 @@ nouveau_connector_create(struct drm_device *dev, int index)
int type, ret = 0;
bool dummy;
- NV_DEBUG(drm, "\n");
-
list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
nv_connector = nouveau_connector(connector);
if (nv_connector->index == index)
@@ -1043,7 +1040,7 @@ nouveau_connector_create(struct drm_device *dev, int index)
/* Init DVI-I specific properties */
if (nv_connector->type == DCB_CONNECTOR_DVI_I)
- drm_connector_attach_property(connector, dev->mode_config.dvi_i_subconnector_property, 0);
+ drm_object_attach_property(&connector->base, dev->mode_config.dvi_i_subconnector_property, 0);
/* Add overscan compensation options to digital outputs */
if (disp->underscan_property &&
@@ -1051,31 +1048,31 @@ nouveau_connector_create(struct drm_device *dev, int index)
type == DRM_MODE_CONNECTOR_DVII ||
type == DRM_MODE_CONNECTOR_HDMIA ||
type == DRM_MODE_CONNECTOR_DisplayPort)) {
- drm_connector_attach_property(connector,
+ drm_object_attach_property(&connector->base,
disp->underscan_property,
UNDERSCAN_OFF);
- drm_connector_attach_property(connector,
+ drm_object_attach_property(&connector->base,
disp->underscan_hborder_property,
0);
- drm_connector_attach_property(connector,
+ drm_object_attach_property(&connector->base,
disp->underscan_vborder_property,
0);
}
/* Add hue and saturation options */
if (disp->vibrant_hue_property)
- drm_connector_attach_property(connector,
+ drm_object_attach_property(&connector->base,
disp->vibrant_hue_property,
90);
if (disp->color_vibrance_property)
- drm_connector_attach_property(connector,
+ drm_object_attach_property(&connector->base,
disp->color_vibrance_property,
150);
switch (nv_connector->type) {
case DCB_CONNECTOR_VGA:
if (nv_device(drm->device)->card_type >= NV_50) {
- drm_connector_attach_property(connector,
+ drm_object_attach_property(&connector->base,
dev->mode_config.scaling_mode_property,
nv_connector->scaling_mode);
}
@@ -1088,18 +1085,18 @@ nouveau_connector_create(struct drm_device *dev, int index)
default:
nv_connector->scaling_mode = DRM_MODE_SCALE_FULLSCREEN;
- drm_connector_attach_property(connector,
+ drm_object_attach_property(&connector->base,
dev->mode_config.scaling_mode_property,
nv_connector->scaling_mode);
if (disp->dithering_mode) {
nv_connector->dithering_mode = DITHERING_MODE_AUTO;
- drm_connector_attach_property(connector,
+ drm_object_attach_property(&connector->base,
disp->dithering_mode,
nv_connector->dithering_mode);
}
if (disp->dithering_depth) {
nv_connector->dithering_depth = DITHERING_DEPTH_AUTO;
- drm_connector_attach_property(connector,
+ drm_object_attach_property(&connector->base,
disp->dithering_depth,
nv_connector->dithering_depth);
}
diff --git a/drivers/gpu/drm/nouveau/nouveau_connector.h b/drivers/gpu/drm/nouveau/nouveau_connector.h
index ebdb87670a8f..20eb84cce9e6 100644
--- a/drivers/gpu/drm/nouveau/nouveau_connector.h
+++ b/drivers/gpu/drm/nouveau/nouveau_connector.h
@@ -28,6 +28,7 @@
#define __NOUVEAU_CONNECTOR_H__
#include <drm/drm_edid.h>
+#include "nouveau_crtc.h"
struct nouveau_i2c_port;
@@ -80,6 +81,21 @@ static inline struct nouveau_connector *nouveau_connector(
return container_of(con, struct nouveau_connector, base);
}
+static inline struct nouveau_connector *
+nouveau_crtc_connector_get(struct nouveau_crtc *nv_crtc)
+{
+ struct drm_device *dev = nv_crtc->base.dev;
+ struct drm_connector *connector;
+ struct drm_crtc *crtc = to_drm_crtc(nv_crtc);
+
+ list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+ if (connector->encoder && connector->encoder->crtc == crtc)
+ return nouveau_connector(connector);
+ }
+
+ return NULL;
+}
+
struct drm_connector *
nouveau_connector_create(struct drm_device *, int index);
diff --git a/drivers/gpu/drm/nouveau/nouveau_crtc.h b/drivers/gpu/drm/nouveau/nouveau_crtc.h
index e6d0d1eb0133..d1e5890784d7 100644
--- a/drivers/gpu/drm/nouveau/nouveau_crtc.h
+++ b/drivers/gpu/drm/nouveau/nouveau_crtc.h
@@ -82,16 +82,6 @@ static inline struct drm_crtc *to_drm_crtc(struct nouveau_crtc *crtc)
return &crtc->base;
}
-int nv50_crtc_create(struct drm_device *dev, int index);
-int nv50_crtc_cursor_set(struct drm_crtc *drm_crtc, struct drm_file *file_priv,
- uint32_t buffer_handle, uint32_t width,
- uint32_t height);
-int nv50_crtc_cursor_move(struct drm_crtc *drm_crtc, int x, int y);
-
int nv04_cursor_init(struct nouveau_crtc *);
-int nv50_cursor_init(struct nouveau_crtc *);
-
-struct nouveau_connector *
-nouveau_crtc_connector_get(struct nouveau_crtc *crtc);
#endif /* __NOUVEAU_CRTC_H__ */
diff --git a/drivers/gpu/drm/nouveau/nouveau_display.c b/drivers/gpu/drm/nouveau/nouveau_display.c
index 86124b131f4f..e4188f24fc75 100644
--- a/drivers/gpu/drm/nouveau/nouveau_display.c
+++ b/drivers/gpu/drm/nouveau/nouveau_display.c
@@ -98,12 +98,12 @@ nouveau_framebuffer_init(struct drm_device *dev,
nv_fb->r_dma = NvEvoVRAM_LP;
switch (fb->depth) {
- case 8: nv_fb->r_format = NV50_EVO_CRTC_FB_DEPTH_8; break;
- case 15: nv_fb->r_format = NV50_EVO_CRTC_FB_DEPTH_15; break;
- case 16: nv_fb->r_format = NV50_EVO_CRTC_FB_DEPTH_16; break;
+ case 8: nv_fb->r_format = 0x1e00; break;
+ case 15: nv_fb->r_format = 0xe900; break;
+ case 16: nv_fb->r_format = 0xe800; break;
case 24:
- case 32: nv_fb->r_format = NV50_EVO_CRTC_FB_DEPTH_24; break;
- case 30: nv_fb->r_format = NV50_EVO_CRTC_FB_DEPTH_30; break;
+ case 32: nv_fb->r_format = 0xcf00; break;
+ case 30: nv_fb->r_format = 0xd100; break;
default:
NV_ERROR(drm, "unknown depth %d\n", fb->depth);
return -EINVAL;
@@ -324,7 +324,7 @@ nouveau_display_create(struct drm_device *dev)
disp->underscan_vborder_property =
drm_property_create_range(dev, 0, "underscan vborder", 0, 128);
- if (gen == 1) {
+ if (gen >= 1) {
disp->vibrant_hue_property =
drm_property_create(dev, DRM_MODE_PROP_RANGE,
"vibrant hue", 2);
@@ -366,10 +366,7 @@ nouveau_display_create(struct drm_device *dev)
if (nv_device(drm->device)->card_type < NV_50)
ret = nv04_display_create(dev);
else
- if (nv_device(drm->device)->card_type < NV_D0)
ret = nv50_display_create(dev);
- else
- ret = nvd0_display_create(dev);
if (ret)
goto disp_create_err;
@@ -400,11 +397,12 @@ nouveau_display_destroy(struct drm_device *dev)
nouveau_backlight_exit(dev);
drm_vblank_cleanup(dev);
+ drm_kms_helper_poll_fini(dev);
+ drm_mode_config_cleanup(dev);
+
if (disp->dtor)
disp->dtor(dev);
- drm_kms_helper_poll_fini(dev);
- drm_mode_config_cleanup(dev);
nouveau_drm(dev)->display = NULL;
kfree(disp);
}
@@ -659,10 +657,7 @@ nouveau_crtc_page_flip(struct drm_crtc *crtc, struct drm_framebuffer *fb,
/* Emit a page flip */
if (nv_device(drm->device)->card_type >= NV_50) {
- if (nv_device(drm->device)->card_type >= NV_D0)
- ret = nvd0_display_flip_next(crtc, fb, chan, 0);
- else
- ret = nv50_display_flip_next(crtc, fb, chan);
+ ret = nv50_display_flip_next(crtc, fb, chan, 0);
if (ret) {
mutex_unlock(&chan->cli->mutex);
goto fail_unreserve;
diff --git a/drivers/gpu/drm/nouveau/nouveau_dp.c b/drivers/gpu/drm/nouveau/nouveau_dp.c
index 978a108ba7a1..59838651ee8f 100644
--- a/drivers/gpu/drm/nouveau/nouveau_dp.c
+++ b/drivers/gpu/drm/nouveau/nouveau_dp.c
@@ -30,60 +30,17 @@
#include "nouveau_encoder.h"
#include "nouveau_crtc.h"
+#include <core/class.h>
+
#include <subdev/gpio.h>
#include <subdev/i2c.h>
-u8 *
-nouveau_dp_bios_data(struct drm_device *dev, struct dcb_output *dcb, u8 **entry)
-{
- struct nouveau_drm *drm = nouveau_drm(dev);
- struct bit_entry d;
- u8 *table;
- int i;
-
- if (bit_table(dev, 'd', &d)) {
- NV_ERROR(drm, "BIT 'd' table not found\n");
- return NULL;
- }
-
- if (d.version != 1) {
- NV_ERROR(drm, "BIT 'd' table version %d unknown\n", d.version);
- return NULL;
- }
-
- table = ROMPTR(dev, d.data[0]);
- if (!table) {
- NV_ERROR(drm, "displayport table pointer invalid\n");
- return NULL;
- }
-
- switch (table[0]) {
- case 0x20:
- case 0x21:
- case 0x30:
- case 0x40:
- break;
- default:
- NV_ERROR(drm, "displayport table 0x%02x unknown\n", table[0]);
- return NULL;
- }
-
- for (i = 0; i < table[3]; i++) {
- *entry = ROMPTR(dev, table[table[1] + (i * table[2])]);
- if (*entry && bios_encoder_match(dcb, ROM32((*entry)[0])))
- return table;
- }
-
- NV_ERROR(drm, "displayport encoder table not found\n");
- return NULL;
-}
-
/******************************************************************************
* link training
*****************************************************************************/
struct dp_state {
struct nouveau_i2c_port *auxch;
- struct dp_train_func *func;
+ struct nouveau_object *core;
struct dcb_output *dcb;
int crtc;
u8 *dpcd;
@@ -97,13 +54,20 @@ static void
dp_set_link_config(struct drm_device *dev, struct dp_state *dp)
{
struct nouveau_drm *drm = nouveau_drm(dev);
+ struct dcb_output *dcb = dp->dcb;
+ const u32 or = ffs(dcb->or) - 1, link = !(dcb->sorconf.link & 1);
+ const u32 moff = (dp->crtc << 3) | (link << 2) | or;
u8 sink[2];
+ u32 data;
NV_DEBUG(drm, "%d lanes at %d KB/s\n", dp->link_nr, dp->link_bw);
/* set desired link configuration on the source */
- dp->func->link_set(dev, dp->dcb, dp->crtc, dp->link_nr, dp->link_bw,
- dp->dpcd[2] & DP_ENHANCED_FRAME_CAP);
+ data = ((dp->link_bw / 27000) << 8) | dp->link_nr;
+ if (dp->dpcd[2] & DP_ENHANCED_FRAME_CAP)
+ data |= NV94_DISP_SOR_DP_LNKCTL_FRAME_ENH;
+
+ nv_call(dp->core, NV94_DISP_SOR_DP_LNKCTL + moff, data);
/* inform the sink of the new configuration */
sink[0] = dp->link_bw / 27000;
@@ -118,11 +82,14 @@ static void
dp_set_training_pattern(struct drm_device *dev, struct dp_state *dp, u8 pattern)
{
struct nouveau_drm *drm = nouveau_drm(dev);
+ struct dcb_output *dcb = dp->dcb;
+ const u32 or = ffs(dcb->or) - 1, link = !(dcb->sorconf.link & 1);
+ const u32 moff = (dp->crtc << 3) | (link << 2) | or;
u8 sink_tp;
NV_DEBUG(drm, "training pattern %d\n", pattern);
- dp->func->train_set(dev, dp->dcb, pattern);
+ nv_call(dp->core, NV94_DISP_SOR_DP_TRAIN + moff, pattern);
nv_rdaux(dp->auxch, DP_TRAINING_PATTERN_SET, &sink_tp, 1);
sink_tp &= ~DP_TRAINING_PATTERN_MASK;
@@ -134,6 +101,9 @@ static int
dp_link_train_commit(struct drm_device *dev, struct dp_state *dp)
{
struct nouveau_drm *drm = nouveau_drm(dev);
+ struct dcb_output *dcb = dp->dcb;
+ const u32 or = ffs(dcb->or) - 1, link = !(dcb->sorconf.link & 1);
+ const u32 moff = (dp->crtc << 3) | (link << 2) | or;
int i;
for (i = 0; i < dp->link_nr; i++) {
@@ -148,7 +118,8 @@ dp_link_train_commit(struct drm_device *dev, struct dp_state *dp)
dp->conf[i] |= DP_TRAIN_MAX_PRE_EMPHASIS_REACHED;
NV_DEBUG(drm, "config lane %d %02x\n", i, dp->conf[i]);
- dp->func->train_adj(dev, dp->dcb, i, lvsw, lpre);
+
+ nv_call(dp->core, NV94_DISP_SOR_DP_DRVCTL(i) + moff, (lvsw << 8) | lpre);
}
return nv_wraux(dp->auxch, DP_TRAINING_LANE0_SET, dp->conf, 4);
@@ -234,59 +205,32 @@ dp_link_train_eq(struct drm_device *dev, struct dp_state *dp)
}
static void
-dp_set_downspread(struct drm_device *dev, struct dp_state *dp, bool enable)
+dp_link_train_init(struct drm_device *dev, struct dp_state *dp, bool spread)
{
- u16 script = 0x0000;
- u8 *entry, *table = nouveau_dp_bios_data(dev, dp->dcb, &entry);
- if (table) {
- if (table[0] >= 0x20 && table[0] <= 0x30) {
- if (enable) script = ROM16(entry[12]);
- else script = ROM16(entry[14]);
- } else
- if (table[0] == 0x40) {
- if (enable) script = ROM16(entry[11]);
- else script = ROM16(entry[13]);
- }
- }
-
- nouveau_bios_run_init_table(dev, script, dp->dcb, dp->crtc);
-}
-
-static void
-dp_link_train_init(struct drm_device *dev, struct dp_state *dp)
-{
- u16 script = 0x0000;
- u8 *entry, *table = nouveau_dp_bios_data(dev, dp->dcb, &entry);
- if (table) {
- if (table[0] >= 0x20 && table[0] <= 0x30)
- script = ROM16(entry[6]);
- else
- if (table[0] == 0x40)
- script = ROM16(entry[5]);
- }
-
- nouveau_bios_run_init_table(dev, script, dp->dcb, dp->crtc);
+ struct dcb_output *dcb = dp->dcb;
+ const u32 or = ffs(dcb->or) - 1, link = !(dcb->sorconf.link & 1);
+ const u32 moff = (dp->crtc << 3) | (link << 2) | or;
+
+ nv_call(dp->core, NV94_DISP_SOR_DP_TRAIN + moff, (spread ?
+ NV94_DISP_SOR_DP_TRAIN_INIT_SPREAD_ON :
+ NV94_DISP_SOR_DP_TRAIN_INIT_SPREAD_OFF) |
+ NV94_DISP_SOR_DP_TRAIN_OP_INIT);
}
static void
dp_link_train_fini(struct drm_device *dev, struct dp_state *dp)
{
- u16 script = 0x0000;
- u8 *entry, *table = nouveau_dp_bios_data(dev, dp->dcb, &entry);
- if (table) {
- if (table[0] >= 0x20 && table[0] <= 0x30)
- script = ROM16(entry[8]);
- else
- if (table[0] == 0x40)
- script = ROM16(entry[7]);
- }
+ struct dcb_output *dcb = dp->dcb;
+ const u32 or = ffs(dcb->or) - 1, link = !(dcb->sorconf.link & 1);
+ const u32 moff = (dp->crtc << 3) | (link << 2) | or;
- nouveau_bios_run_init_table(dev, script, dp->dcb, dp->crtc);
+ nv_call(dp->core, NV94_DISP_SOR_DP_TRAIN + moff,
+ NV94_DISP_SOR_DP_TRAIN_OP_FINI);
}
static bool
nouveau_dp_link_train(struct drm_encoder *encoder, u32 datarate,
- struct dp_train_func *func)
+ struct nouveau_object *core)
{
struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
struct nouveau_crtc *nv_crtc = nouveau_crtc(encoder->crtc);
@@ -304,7 +248,7 @@ nouveau_dp_link_train(struct drm_encoder *encoder, u32 datarate,
if (!dp.auxch)
return false;
- dp.func = func;
+ dp.core = core;
dp.dcb = nv_encoder->dcb;
dp.crtc = nv_crtc->index;
dp.dpcd = nv_encoder->dp.dpcd;
@@ -318,11 +262,8 @@ nouveau_dp_link_train(struct drm_encoder *encoder, u32 datarate,
*/
gpio->irq(gpio, 0, nv_connector->hpd, 0xff, false);
- /* enable down-spreading, if possible */
- dp_set_downspread(dev, &dp, nv_encoder->dp.dpcd[3] & 1);
-
- /* execute pre-train script from vbios */
- dp_link_train_init(dev, &dp);
+ /* enable down-spreading and execute pre-train script from vbios */
+ dp_link_train_init(dev, &dp, nv_encoder->dp.dpcd[3] & 1);
/* start off at highest link rate supported by encoder and display */
while (*link_bw > nv_encoder->dp.link_bw)
@@ -365,7 +306,7 @@ nouveau_dp_link_train(struct drm_encoder *encoder, u32 datarate,
void
nouveau_dp_dpms(struct drm_encoder *encoder, int mode, u32 datarate,
- struct dp_train_func *func)
+ struct nouveau_object *core)
{
struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
struct nouveau_drm *drm = nouveau_drm(encoder->dev);
@@ -385,7 +326,7 @@ nouveau_dp_dpms(struct drm_encoder *encoder, int mode, u32 datarate,
nv_wraux(auxch, DP_SET_POWER, &status, 1);
if (mode == DRM_MODE_DPMS_ON)
- nouveau_dp_link_train(encoder, datarate, func);
+ nouveau_dp_link_train(encoder, datarate, core);
}
static void
diff --git a/drivers/gpu/drm/nouveau/nouveau_drm.c b/drivers/gpu/drm/nouveau/nouveau_drm.c
index 8244863cc049..01c403ddb99b 100644
--- a/drivers/gpu/drm/nouveau/nouveau_drm.c
+++ b/drivers/gpu/drm/nouveau/nouveau_drm.c
@@ -49,8 +49,6 @@
#include "nouveau_fbcon.h"
#include "nouveau_fence.h"
-#include "nouveau_ttm.h"
-
MODULE_PARM_DESC(config, "option string to pass to driver core");
static char *nouveau_config;
module_param_named(config, nouveau_config, charp, 0400);
@@ -129,7 +127,8 @@ nouveau_accel_init(struct nouveau_drm *drm)
/* initialise synchronisation routines */
if (device->card_type < NV_10) ret = nv04_fence_create(drm);
- else if (device->chipset < 0x84) ret = nv10_fence_create(drm);
+ else if (device->card_type < NV_50) ret = nv10_fence_create(drm);
+ else if (device->chipset < 0x84) ret = nv50_fence_create(drm);
else if (device->card_type < NV_C0) ret = nv84_fence_create(drm);
else ret = nvc0_fence_create(drm);
if (ret) {
@@ -148,7 +147,7 @@ nouveau_accel_init(struct nouveau_drm *drm)
NV_ERROR(drm, "failed to create ce channel, %d\n", ret);
arg0 = NVE0_CHANNEL_IND_ENGINE_GR;
- arg1 = 0;
+ arg1 = 1;
} else {
arg0 = NvDmaFB;
arg1 = NvDmaTT;
@@ -395,17 +394,12 @@ nouveau_drm_remove(struct pci_dev *pdev)
}
int
-nouveau_drm_suspend(struct pci_dev *pdev, pm_message_t pm_state)
+nouveau_do_suspend(struct drm_device *dev)
{
- struct drm_device *dev = pci_get_drvdata(pdev);
struct nouveau_drm *drm = nouveau_drm(dev);
struct nouveau_cli *cli;
int ret;
- if (dev->switch_power_state == DRM_SWITCH_POWER_OFF ||
- pm_state.event == PM_EVENT_PRETHAW)
- return 0;
-
if (dev->mode_config.num_crtc) {
NV_INFO(drm, "suspending fbcon...\n");
nouveau_fbcon_set_suspend(dev, 1);
@@ -436,13 +430,6 @@ nouveau_drm_suspend(struct pci_dev *pdev, pm_message_t pm_state)
goto fail_client;
nouveau_agp_fini(drm);
-
- pci_save_state(pdev);
- if (pm_state.event == PM_EVENT_SUSPEND) {
- pci_disable_device(pdev);
- pci_set_power_state(pdev, PCI_D3hot);
- }
-
return 0;
fail_client:
@@ -457,24 +444,33 @@ fail_client:
return ret;
}
-int
-nouveau_drm_resume(struct pci_dev *pdev)
+int nouveau_pmops_suspend(struct device *dev)
{
- struct drm_device *dev = pci_get_drvdata(pdev);
- struct nouveau_drm *drm = nouveau_drm(dev);
- struct nouveau_cli *cli;
+ struct pci_dev *pdev = to_pci_dev(dev);
+ struct drm_device *drm_dev = pci_get_drvdata(pdev);
int ret;
- if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
+ if (drm_dev->switch_power_state == DRM_SWITCH_POWER_OFF)
return 0;
- NV_INFO(drm, "re-enabling device...\n");
- pci_set_power_state(pdev, PCI_D0);
- pci_restore_state(pdev);
- ret = pci_enable_device(pdev);
+ ret = nouveau_do_suspend(drm_dev);
if (ret)
return ret;
- pci_set_master(pdev);
+
+ pci_save_state(pdev);
+ pci_disable_device(pdev);
+ pci_set_power_state(pdev, PCI_D3hot);
+
+ return 0;
+}
+
+int
+nouveau_do_resume(struct drm_device *dev)
+{
+ struct nouveau_drm *drm = nouveau_drm(dev);
+ struct nouveau_cli *cli;
+
+ NV_INFO(drm, "re-enabling device...\n");
nouveau_agp_reset(drm);
@@ -500,6 +496,42 @@ nouveau_drm_resume(struct pci_dev *pdev)
return 0;
}
+int nouveau_pmops_resume(struct device *dev)
+{
+ struct pci_dev *pdev = to_pci_dev(dev);
+ struct drm_device *drm_dev = pci_get_drvdata(pdev);
+ int ret;
+
+ if (drm_dev->switch_power_state == DRM_SWITCH_POWER_OFF)
+ return 0;
+
+ pci_set_power_state(pdev, PCI_D0);
+ pci_restore_state(pdev);
+ ret = pci_enable_device(pdev);
+ if (ret)
+ return ret;
+ pci_set_master(pdev);
+
+ return nouveau_do_resume(drm_dev);
+}
+
+static int nouveau_pmops_freeze(struct device *dev)
+{
+ struct pci_dev *pdev = to_pci_dev(dev);
+ struct drm_device *drm_dev = pci_get_drvdata(pdev);
+
+ return nouveau_do_suspend(drm_dev);
+}
+
+static int nouveau_pmops_thaw(struct device *dev)
+{
+ struct pci_dev *pdev = to_pci_dev(dev);
+ struct drm_device *drm_dev = pci_get_drvdata(pdev);
+
+ return nouveau_do_resume(drm_dev);
+}
+
+
static int
nouveau_drm_open(struct drm_device *dev, struct drm_file *fpriv)
{
@@ -652,14 +684,22 @@ nouveau_drm_pci_table[] = {
{}
};
+static const struct dev_pm_ops nouveau_pm_ops = {
+ .suspend = nouveau_pmops_suspend,
+ .resume = nouveau_pmops_resume,
+ .freeze = nouveau_pmops_freeze,
+ .thaw = nouveau_pmops_thaw,
+ .poweroff = nouveau_pmops_freeze,
+ .restore = nouveau_pmops_resume,
+};
+
static struct pci_driver
nouveau_drm_pci_driver = {
.name = "nouveau",
.id_table = nouveau_drm_pci_table,
.probe = nouveau_drm_probe,
.remove = nouveau_drm_remove,
- .suspend = nouveau_drm_suspend,
- .resume = nouveau_drm_resume,
+ .driver.pm = &nouveau_pm_ops,
};
static int __init
diff --git a/drivers/gpu/drm/nouveau/nouveau_drm.h b/drivers/gpu/drm/nouveau/nouveau_drm.h
index a10169927086..aa89eb938b47 100644
--- a/drivers/gpu/drm/nouveau/nouveau_drm.h
+++ b/drivers/gpu/drm/nouveau/nouveau_drm.h
@@ -129,8 +129,8 @@ nouveau_dev(struct drm_device *dev)
return nv_device(nouveau_drm(dev)->device);
}
-int nouveau_drm_suspend(struct pci_dev *, pm_message_t);
-int nouveau_drm_resume(struct pci_dev *);
+int nouveau_pmops_suspend(struct device *);
+int nouveau_pmops_resume(struct device *);
#define NV_FATAL(cli, fmt, args...) nv_fatal((cli), fmt, ##args)
#define NV_ERROR(cli, fmt, args...) nv_error((cli), fmt, ##args)
diff --git a/drivers/gpu/drm/nouveau/nouveau_encoder.h b/drivers/gpu/drm/nouveau/nouveau_encoder.h
index 6a17bf2ba9a4..d0d95bd511ab 100644
--- a/drivers/gpu/drm/nouveau/nouveau_encoder.h
+++ b/drivers/gpu/drm/nouveau/nouveau_encoder.h
@@ -93,14 +93,9 @@ get_slave_funcs(struct drm_encoder *enc)
/* nouveau_dp.c */
bool nouveau_dp_detect(struct drm_encoder *);
void nouveau_dp_dpms(struct drm_encoder *, int mode, u32 datarate,
- struct dp_train_func *);
-u8 *nouveau_dp_bios_data(struct drm_device *, struct dcb_output *, u8 **);
+ struct nouveau_object *);
struct nouveau_connector *
nouveau_encoder_connector_get(struct nouveau_encoder *encoder);
-int nv50_sor_create(struct drm_connector *, struct dcb_output *);
-void nv50_sor_dp_calc_tu(struct drm_device *, int, int, u32, u32);
-int nv50_dac_create(struct drm_connector *, struct dcb_output *);
-
#endif /* __NOUVEAU_ENCODER_H__ */
diff --git a/drivers/gpu/drm/nouveau/nouveau_gem.c b/drivers/gpu/drm/nouveau/nouveau_gem.c
index 5e2f52158f19..8bf695c52f95 100644
--- a/drivers/gpu/drm/nouveau/nouveau_gem.c
+++ b/drivers/gpu/drm/nouveau/nouveau_gem.c
@@ -433,7 +433,7 @@ validate_list(struct nouveau_channel *chan, struct list_head *list,
return ret;
}
- ret = nouveau_bo_validate(nvbo, true, false, false);
+ ret = nouveau_bo_validate(nvbo, true, false);
if (unlikely(ret)) {
if (ret != -ERESTARTSYS)
NV_ERROR(drm, "fail ttm_validate\n");
diff --git a/drivers/gpu/drm/nouveau/nouveau_hdmi.c b/drivers/gpu/drm/nouveau/nouveau_hdmi.c
deleted file mode 100644
index 2c672cebc889..000000000000
--- a/drivers/gpu/drm/nouveau/nouveau_hdmi.c
+++ /dev/null
@@ -1,261 +0,0 @@
-/*
- * Copyright 2011 Red Hat Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- * Authors: Ben Skeggs
- */
-
-#include <drm/drmP.h>
-#include "nouveau_drm.h"
-#include "nouveau_connector.h"
-#include "nouveau_encoder.h"
-#include "nouveau_crtc.h"
-
-static bool
-hdmi_sor(struct drm_encoder *encoder)
-{
- struct nouveau_drm *drm = nouveau_drm(encoder->dev);
- if (nv_device(drm->device)->chipset < 0xa3 ||
- nv_device(drm->device)->chipset == 0xaa ||
- nv_device(drm->device)->chipset == 0xac)
- return false;
- return true;
-}
-
-static inline u32
-hdmi_base(struct drm_encoder *encoder)
-{
- struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
- struct nouveau_crtc *nv_crtc = nouveau_crtc(nv_encoder->crtc);
- if (!hdmi_sor(encoder))
- return 0x616500 + (nv_crtc->index * 0x800);
- return 0x61c500 + (nv_encoder->or * 0x800);
-}
-
-static void
-hdmi_wr32(struct drm_encoder *encoder, u32 reg, u32 val)
-{
- struct nouveau_device *device = nouveau_dev(encoder->dev);
- nv_wr32(device, hdmi_base(encoder) + reg, val);
-}
-
-static u32
-hdmi_rd32(struct drm_encoder *encoder, u32 reg)
-{
- struct nouveau_device *device = nouveau_dev(encoder->dev);
- return nv_rd32(device, hdmi_base(encoder) + reg);
-}
-
-static u32
-hdmi_mask(struct drm_encoder *encoder, u32 reg, u32 mask, u32 val)
-{
- u32 tmp = hdmi_rd32(encoder, reg);
- hdmi_wr32(encoder, reg, (tmp & ~mask) | val);
- return tmp;
-}
-
-static void
-nouveau_audio_disconnect(struct drm_encoder *encoder)
-{
- struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
- struct nouveau_device *device = nouveau_dev(encoder->dev);
- u32 or = nv_encoder->or * 0x800;
-
- if (hdmi_sor(encoder))
- nv_mask(device, 0x61c448 + or, 0x00000003, 0x00000000);
-}
-
-static void
-nouveau_audio_mode_set(struct drm_encoder *encoder,
- struct drm_display_mode *mode)
-{
- struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
- struct nouveau_device *device = nouveau_dev(encoder->dev);
- struct nouveau_connector *nv_connector;
- u32 or = nv_encoder->or * 0x800;
- int i;
-
- nv_connector = nouveau_encoder_connector_get(nv_encoder);
- if (!drm_detect_monitor_audio(nv_connector->edid)) {
- nouveau_audio_disconnect(encoder);
- return;
- }
-
- if (hdmi_sor(encoder)) {
- nv_mask(device, 0x61c448 + or, 0x00000001, 0x00000001);
-
- drm_edid_to_eld(&nv_connector->base, nv_connector->edid);
- if (nv_connector->base.eld[0]) {
- u8 *eld = nv_connector->base.eld;
- for (i = 0; i < eld[2] * 4; i++)
- nv_wr32(device, 0x61c440 + or, (i << 8) | eld[i]);
- for (i = eld[2] * 4; i < 0x60; i++)
- nv_wr32(device, 0x61c440 + or, (i << 8) | 0x00);
- nv_mask(device, 0x61c448 + or, 0x00000002, 0x00000002);
- }
- }
-}
-
-static void
-nouveau_hdmi_infoframe(struct drm_encoder *encoder, u32 ctrl, u8 *frame)
-{
- /* calculate checksum for the infoframe */
- u8 sum = 0, i;
- for (i = 0; i < frame[2]; i++)
- sum += frame[i];
- frame[3] = 256 - sum;
-
- /* disable infoframe, and write header */
- hdmi_mask(encoder, ctrl + 0x00, 0x00000001, 0x00000000);
- hdmi_wr32(encoder, ctrl + 0x08, *(u32 *)frame & 0xffffff);
-
- /* register scans tell me the audio infoframe has only one set of
- * subpack regs, according to tegra (gee nvidia, it'd be nice if we
- * could get those docs too!), the hdmi block pads out the rest of
- * the packet on its own.
- */
- if (ctrl == 0x020)
- frame[2] = 6;
-
- /* write out checksum and data, weird weird 7 byte register pairs */
- for (i = 0; i < frame[2] + 1; i += 7) {
- u32 rsubpack = ctrl + 0x0c + ((i / 7) * 8);
- u32 *subpack = (u32 *)&frame[3 + i];
- hdmi_wr32(encoder, rsubpack + 0, subpack[0]);
- hdmi_wr32(encoder, rsubpack + 4, subpack[1] & 0xffffff);
- }
-
- /* enable the infoframe */
- hdmi_mask(encoder, ctrl, 0x00000001, 0x00000001);
-}
-
-static void
-nouveau_hdmi_video_infoframe(struct drm_encoder *encoder,
- struct drm_display_mode *mode)
-{
- const u8 Y = 0, A = 0, B = 0, S = 0, C = 0, M = 0, R = 0;
- const u8 ITC = 0, EC = 0, Q = 0, SC = 0, VIC = 0, PR = 0;
- const u8 bar_top = 0, bar_bottom = 0, bar_left = 0, bar_right = 0;
- u8 frame[20];
-
- frame[0x00] = 0x82; /* AVI infoframe */
- frame[0x01] = 0x02; /* version */
- frame[0x02] = 0x0d; /* length */
- frame[0x03] = 0x00;
- frame[0x04] = (Y << 5) | (A << 4) | (B << 2) | S;
- frame[0x05] = (C << 6) | (M << 4) | R;
- frame[0x06] = (ITC << 7) | (EC << 4) | (Q << 2) | SC;
- frame[0x07] = VIC;
- frame[0x08] = PR;
- frame[0x09] = bar_top & 0xff;
- frame[0x0a] = bar_top >> 8;
- frame[0x0b] = bar_bottom & 0xff;
- frame[0x0c] = bar_bottom >> 8;
- frame[0x0d] = bar_left & 0xff;
- frame[0x0e] = bar_left >> 8;
- frame[0x0f] = bar_right & 0xff;
- frame[0x10] = bar_right >> 8;
- frame[0x11] = 0x00;
- frame[0x12] = 0x00;
- frame[0x13] = 0x00;
-
- nouveau_hdmi_infoframe(encoder, 0x020, frame);
-}
-
-static void
-nouveau_hdmi_audio_infoframe(struct drm_encoder *encoder,
- struct drm_display_mode *mode)
-{
- const u8 CT = 0x00, CC = 0x01, ceaSS = 0x00, SF = 0x00, FMT = 0x00;
- const u8 CA = 0x00, DM_INH = 0, LSV = 0x00;
- u8 frame[12];
-
- frame[0x00] = 0x84; /* Audio infoframe */
- frame[0x01] = 0x01; /* version */
- frame[0x02] = 0x0a; /* length */
- frame[0x03] = 0x00;
- frame[0x04] = (CT << 4) | CC;
- frame[0x05] = (SF << 2) | ceaSS;
- frame[0x06] = FMT;
- frame[0x07] = CA;
- frame[0x08] = (DM_INH << 7) | (LSV << 3);
- frame[0x09] = 0x00;
- frame[0x0a] = 0x00;
- frame[0x0b] = 0x00;
-
- nouveau_hdmi_infoframe(encoder, 0x000, frame);
-}
-
-static void
-nouveau_hdmi_disconnect(struct drm_encoder *encoder)
-{
- nouveau_audio_disconnect(encoder);
-
- /* disable audio and avi infoframes */
- hdmi_mask(encoder, 0x000, 0x00000001, 0x00000000);
- hdmi_mask(encoder, 0x020, 0x00000001, 0x00000000);
-
- /* disable hdmi */
- hdmi_mask(encoder, 0x0a4, 0x40000000, 0x00000000);
-}
-
-void
-nouveau_hdmi_mode_set(struct drm_encoder *encoder,
- struct drm_display_mode *mode)
-{
- struct nouveau_device *device = nouveau_dev(encoder->dev);
- struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
- struct nouveau_connector *nv_connector;
- u32 max_ac_packet, rekey;
-
- nv_connector = nouveau_encoder_connector_get(nv_encoder);
- if (!mode || !nv_connector || !nv_connector->edid ||
- !drm_detect_hdmi_monitor(nv_connector->edid)) {
- nouveau_hdmi_disconnect(encoder);
- return;
- }
-
- nouveau_hdmi_video_infoframe(encoder, mode);
- nouveau_hdmi_audio_infoframe(encoder, mode);
-
- hdmi_mask(encoder, 0x0d0, 0x00070001, 0x00010001); /* SPARE, HW_CTS */
- hdmi_mask(encoder, 0x068, 0x00010101, 0x00000000); /* ACR_CTRL, ?? */
- hdmi_mask(encoder, 0x078, 0x80000000, 0x80000000); /* ACR_0441_ENABLE */
-
- nv_mask(device, 0x61733c, 0x00100000, 0x00100000); /* RESETF */
- nv_mask(device, 0x61733c, 0x10000000, 0x10000000); /* LOOKUP_EN */
- nv_mask(device, 0x61733c, 0x00100000, 0x00000000); /* !RESETF */
-
- /* value matches nvidia binary driver, and tegra constant */
- rekey = 56;
-
- max_ac_packet = mode->htotal - mode->hdisplay;
- max_ac_packet -= rekey;
- max_ac_packet -= 18; /* constant from tegra */
- max_ac_packet /= 32;
-
- /* enable hdmi */
- hdmi_mask(encoder, 0x0a4, 0x5f1f003f, 0x40000000 | /* enable */
- 0x1f000000 | /* unknown */
- max_ac_packet << 16 |
- rekey);
-
- nouveau_audio_mode_set(encoder, mode);
-}
diff --git a/drivers/gpu/drm/nouveau/nouveau_irq.c b/drivers/gpu/drm/nouveau/nouveau_irq.c
index 1d8cb506a28a..1303680affd3 100644
--- a/drivers/gpu/drm/nouveau/nouveau_irq.c
+++ b/drivers/gpu/drm/nouveau/nouveau_irq.c
@@ -60,18 +60,6 @@ nouveau_irq_handler(DRM_IRQ_ARGS)
return IRQ_NONE;
nv_subdev(pmc)->intr(nv_subdev(pmc));
-
- if (dev->mode_config.num_crtc) {
- if (device->card_type >= NV_D0) {
- if (nv_rd32(device, 0x000100) & 0x04000000)
- nvd0_display_intr(dev);
- } else
- if (device->card_type >= NV_50) {
- if (nv_rd32(device, 0x000100) & 0x04000000)
- nv50_display_intr(dev);
- }
- }
-
return IRQ_HANDLED;
}
diff --git a/drivers/gpu/drm/nouveau/nouveau_prime.c b/drivers/gpu/drm/nouveau/nouveau_prime.c
index 366462cf8a2c..3543fec2355e 100644
--- a/drivers/gpu/drm/nouveau/nouveau_prime.c
+++ b/drivers/gpu/drm/nouveau/nouveau_prime.c
@@ -155,10 +155,6 @@ nouveau_prime_new(struct drm_device *dev,
return ret;
nvbo = *pnvbo;
- /* we restrict allowed domains on nv50+ to only the types
- * that were requested at creation time. not possibly on
- * earlier chips without busting the ABI.
- */
nvbo->valid_domains = NOUVEAU_GEM_DOMAIN_GART;
nvbo->gem = drm_gem_object_alloc(dev, nvbo->bo.mem.size);
if (!nvbo->gem) {
diff --git a/drivers/gpu/drm/nouveau/nouveau_vga.c b/drivers/gpu/drm/nouveau/nouveau_vga.c
index 6f0ac64873df..25d3495725eb 100644
--- a/drivers/gpu/drm/nouveau/nouveau_vga.c
+++ b/drivers/gpu/drm/nouveau/nouveau_vga.c
@@ -31,12 +31,11 @@ nouveau_switcheroo_set_state(struct pci_dev *pdev,
enum vga_switcheroo_state state)
{
struct drm_device *dev = pci_get_drvdata(pdev);
- pm_message_t pmm = { .event = PM_EVENT_SUSPEND };
if (state == VGA_SWITCHEROO_ON) {
printk(KERN_ERR "VGA switcheroo: switched nouveau on\n");
dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
- nouveau_drm_resume(pdev);
+ nouveau_pmops_resume(&pdev->dev);
drm_kms_helper_poll_enable(dev);
dev->switch_power_state = DRM_SWITCH_POWER_ON;
} else {
@@ -44,7 +43,7 @@ nouveau_switcheroo_set_state(struct pci_dev *pdev,
dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
drm_kms_helper_poll_disable(dev);
nouveau_switcheroo_optimus_dsm();
- nouveau_drm_suspend(pdev, pmm);
+ nouveau_pmops_suspend(&pdev->dev);
dev->switch_power_state = DRM_SWITCH_POWER_OFF;
}
}
diff --git a/drivers/gpu/drm/nouveau/nv04_crtc.c b/drivers/gpu/drm/nouveau/nv04_crtc.c
index 82a0d9c6cda3..6578cd28c556 100644
--- a/drivers/gpu/drm/nouveau/nv04_crtc.c
+++ b/drivers/gpu/drm/nouveau/nv04_crtc.c
@@ -730,6 +730,7 @@ static void nv_crtc_destroy(struct drm_crtc *crtc)
drm_crtc_cleanup(crtc);
nouveau_bo_unmap(nv_crtc->cursor.nvbo);
+ nouveau_bo_unpin(nv_crtc->cursor.nvbo);
nouveau_bo_ref(NULL, &nv_crtc->cursor.nvbo);
kfree(nv_crtc);
}
@@ -1056,8 +1057,11 @@ nv04_crtc_create(struct drm_device *dev, int crtc_num)
0, 0x0000, NULL, &nv_crtc->cursor.nvbo);
if (!ret) {
ret = nouveau_bo_pin(nv_crtc->cursor.nvbo, TTM_PL_FLAG_VRAM);
- if (!ret)
+ if (!ret) {
ret = nouveau_bo_map(nv_crtc->cursor.nvbo);
+ if (ret)
+ nouveau_bo_unpin(nv_crtc->cursor.nvbo);
+ }
if (ret)
nouveau_bo_ref(NULL, &nv_crtc->cursor.nvbo);
}
diff --git a/drivers/gpu/drm/nouveau/nv04_display.c b/drivers/gpu/drm/nouveau/nv04_display.c
index 846050f04c23..2cd6fb8c548e 100644
--- a/drivers/gpu/drm/nouveau/nv04_display.c
+++ b/drivers/gpu/drm/nouveau/nv04_display.c
@@ -60,8 +60,6 @@ nv04_display_create(struct drm_device *dev)
struct nv04_display *disp;
int i, ret;
- NV_DEBUG(drm, "\n");
-
disp = kzalloc(sizeof(*disp), GFP_KERNEL);
if (!disp)
return -ENOMEM;
@@ -132,13 +130,10 @@ nv04_display_create(struct drm_device *dev)
void
nv04_display_destroy(struct drm_device *dev)
{
- struct nouveau_drm *drm = nouveau_drm(dev);
struct nv04_display *disp = nv04_display(dev);
struct drm_encoder *encoder;
struct drm_crtc *crtc;
- NV_DEBUG(drm, "\n");
-
/* Turn every CRTC off. */
list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
struct drm_mode_set modeset = {
diff --git a/drivers/gpu/drm/nouveau/nv10_fence.c b/drivers/gpu/drm/nouveau/nv10_fence.c
index ce752bf5cc4e..7ae7f97a6d4d 100644
--- a/drivers/gpu/drm/nouveau/nv10_fence.c
+++ b/drivers/gpu/drm/nouveau/nv10_fence.c
@@ -155,6 +155,8 @@ nv10_fence_destroy(struct nouveau_drm *drm)
{
struct nv10_fence_priv *priv = drm->fence;
nouveau_bo_unmap(priv->bo);
+ if (priv->bo)
+ nouveau_bo_unpin(priv->bo);
nouveau_bo_ref(NULL, &priv->bo);
drm->fence = NULL;
kfree(priv);
@@ -183,8 +185,11 @@ nv10_fence_create(struct nouveau_drm *drm)
0, 0x0000, NULL, &priv->bo);
if (!ret) {
ret = nouveau_bo_pin(priv->bo, TTM_PL_FLAG_VRAM);
- if (!ret)
+ if (!ret) {
ret = nouveau_bo_map(priv->bo);
+ if (ret)
+ nouveau_bo_unpin(priv->bo);
+ }
if (ret)
nouveau_bo_ref(NULL, &priv->bo);
}
diff --git a/drivers/gpu/drm/nouveau/nv17_tv.c b/drivers/gpu/drm/nouveau/nv17_tv.c
index 897b63621e2d..2ca276ada507 100644
--- a/drivers/gpu/drm/nouveau/nv17_tv.c
+++ b/drivers/gpu/drm/nouveau/nv17_tv.c
@@ -195,7 +195,7 @@ nv17_tv_detect(struct drm_encoder *encoder, struct drm_connector *connector)
break;
}
- drm_connector_property_set_value(connector,
+ drm_object_property_set_value(&connector->base,
conf->tv_subconnector_property,
tv_enc->subconnector);
@@ -672,25 +672,25 @@ static int nv17_tv_create_resources(struct drm_encoder *encoder,
drm_mode_create_tv_properties(dev, num_tv_norms, nv17_tv_norm_names);
- drm_connector_attach_property(connector,
+ drm_object_attach_property(&connector->base,
conf->tv_select_subconnector_property,
tv_enc->select_subconnector);
- drm_connector_attach_property(connector,
+ drm_object_attach_property(&connector->base,
conf->tv_subconnector_property,
tv_enc->subconnector);
- drm_connector_attach_property(connector,
+ drm_object_attach_property(&connector->base,
conf->tv_mode_property,
tv_enc->tv_norm);
- drm_connector_attach_property(connector,
+ drm_object_attach_property(&connector->base,
conf->tv_flicker_reduction_property,
tv_enc->flicker);
- drm_connector_attach_property(connector,
+ drm_object_attach_property(&connector->base,
conf->tv_saturation_property,
tv_enc->saturation);
- drm_connector_attach_property(connector,
+ drm_object_attach_property(&connector->base,
conf->tv_hue_property,
tv_enc->hue);
- drm_connector_attach_property(connector,
+ drm_object_attach_property(&connector->base,
conf->tv_overscan_property,
tv_enc->overscan);
diff --git a/drivers/gpu/drm/nouveau/nv50_crtc.c b/drivers/gpu/drm/nouveau/nv50_crtc.c
deleted file mode 100644
index 222de77d6269..000000000000
--- a/drivers/gpu/drm/nouveau/nv50_crtc.c
+++ /dev/null
@@ -1,764 +0,0 @@
-/*
- * Copyright (C) 2008 Maarten Maathuis.
- * All Rights Reserved.
- *
- * Permission is hereby granted, free of charge, to any person obtaining
- * a copy of this software and associated documentation files (the
- * "Software"), to deal in the Software without restriction, including
- * without limitation the rights to use, copy, modify, merge, publish,
- * distribute, sublicense, and/or sell copies of the Software, and to
- * permit persons to whom the Software is furnished to do so, subject to
- * the following conditions:
- *
- * The above copyright notice and this permission notice (including the
- * next paragraph) shall be included in all copies or substantial
- * portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
- * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
- * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
- * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
- * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
- *
- */
-
-#include <drm/drmP.h>
-#include <drm/drm_crtc_helper.h>
-
-#include "nouveau_reg.h"
-#include "nouveau_drm.h"
-#include "nouveau_dma.h"
-#include "nouveau_gem.h"
-#include "nouveau_hw.h"
-#include "nouveau_encoder.h"
-#include "nouveau_crtc.h"
-#include "nouveau_connector.h"
-#include "nv50_display.h"
-
-#include <subdev/clock.h>
-
-static void
-nv50_crtc_lut_load(struct drm_crtc *crtc)
-{
- struct nouveau_drm *drm = nouveau_drm(crtc->dev);
- struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
- void __iomem *lut = nvbo_kmap_obj_iovirtual(nv_crtc->lut.nvbo);
- int i;
-
- NV_DEBUG(drm, "\n");
-
- for (i = 0; i < 256; i++) {
- writew(nv_crtc->lut.r[i] >> 2, lut + 8*i + 0);
- writew(nv_crtc->lut.g[i] >> 2, lut + 8*i + 2);
- writew(nv_crtc->lut.b[i] >> 2, lut + 8*i + 4);
- }
-
- if (nv_crtc->lut.depth == 30) {
- writew(nv_crtc->lut.r[i - 1] >> 2, lut + 8*i + 0);
- writew(nv_crtc->lut.g[i - 1] >> 2, lut + 8*i + 2);
- writew(nv_crtc->lut.b[i - 1] >> 2, lut + 8*i + 4);
- }
-}
-
-int
-nv50_crtc_blank(struct nouveau_crtc *nv_crtc, bool blanked)
-{
- struct drm_device *dev = nv_crtc->base.dev;
- struct nouveau_drm *drm = nouveau_drm(dev);
- struct nouveau_channel *evo = nv50_display(dev)->master;
- int index = nv_crtc->index, ret;
-
- NV_DEBUG(drm, "index %d\n", nv_crtc->index);
- NV_DEBUG(drm, "%s\n", blanked ? "blanked" : "unblanked");
-
- if (blanked) {
- nv_crtc->cursor.hide(nv_crtc, false);
-
- ret = RING_SPACE(evo, nv_device(drm->device)->chipset != 0x50 ? 7 : 5);
- if (ret) {
- NV_ERROR(drm, "no space while blanking crtc\n");
- return ret;
- }
- BEGIN_NV04(evo, 0, NV50_EVO_CRTC(index, CLUT_MODE), 2);
- OUT_RING(evo, NV50_EVO_CRTC_CLUT_MODE_BLANK);
- OUT_RING(evo, 0);
- if (nv_device(drm->device)->chipset != 0x50) {
- BEGIN_NV04(evo, 0, NV84_EVO_CRTC(index, CLUT_DMA), 1);
- OUT_RING(evo, NV84_EVO_CRTC_CLUT_DMA_HANDLE_NONE);
- }
-
- BEGIN_NV04(evo, 0, NV50_EVO_CRTC(index, FB_DMA), 1);
- OUT_RING(evo, NV50_EVO_CRTC_FB_DMA_HANDLE_NONE);
- } else {
- if (nv_crtc->cursor.visible)
- nv_crtc->cursor.show(nv_crtc, false);
- else
- nv_crtc->cursor.hide(nv_crtc, false);
-
- ret = RING_SPACE(evo, nv_device(drm->device)->chipset != 0x50 ? 10 : 8);
- if (ret) {
- NV_ERROR(drm, "no space while unblanking crtc\n");
- return ret;
- }
- BEGIN_NV04(evo, 0, NV50_EVO_CRTC(index, CLUT_MODE), 2);
- OUT_RING(evo, nv_crtc->lut.depth == 8 ?
- NV50_EVO_CRTC_CLUT_MODE_OFF :
- NV50_EVO_CRTC_CLUT_MODE_ON);
- OUT_RING(evo, nv_crtc->lut.nvbo->bo.offset >> 8);
- if (nv_device(drm->device)->chipset != 0x50) {
- BEGIN_NV04(evo, 0, NV84_EVO_CRTC(index, CLUT_DMA), 1);
- OUT_RING(evo, NvEvoVRAM);
- }
-
- BEGIN_NV04(evo, 0, NV50_EVO_CRTC(index, FB_OFFSET), 2);
- OUT_RING(evo, nv_crtc->fb.offset >> 8);
- OUT_RING(evo, 0);
- BEGIN_NV04(evo, 0, NV50_EVO_CRTC(index, FB_DMA), 1);
- if (nv_device(drm->device)->chipset != 0x50)
- if (nv_crtc->fb.tile_flags == 0x7a00 ||
- nv_crtc->fb.tile_flags == 0xfe00)
- OUT_RING(evo, NvEvoFB32);
- else
- if (nv_crtc->fb.tile_flags == 0x7000)
- OUT_RING(evo, NvEvoFB16);
- else
- OUT_RING(evo, NvEvoVRAM_LP);
- else
- OUT_RING(evo, NvEvoVRAM_LP);
- }
-
- nv_crtc->fb.blanked = blanked;
- return 0;
-}
-
-static int
-nv50_crtc_set_dither(struct nouveau_crtc *nv_crtc, bool update)
-{
- struct nouveau_channel *evo = nv50_display(nv_crtc->base.dev)->master;
- struct nouveau_connector *nv_connector;
- struct drm_connector *connector;
- int head = nv_crtc->index, ret;
- u32 mode = 0x00;
-
- nv_connector = nouveau_crtc_connector_get(nv_crtc);
- connector = &nv_connector->base;
- if (nv_connector->dithering_mode == DITHERING_MODE_AUTO) {
- if (nv_crtc->base.fb->depth > connector->display_info.bpc * 3)
- mode = DITHERING_MODE_DYNAMIC2X2;
- } else {
- mode = nv_connector->dithering_mode;
- }
-
- if (nv_connector->dithering_depth == DITHERING_DEPTH_AUTO) {
- if (connector->display_info.bpc >= 8)
- mode |= DITHERING_DEPTH_8BPC;
- } else {
- mode |= nv_connector->dithering_depth;
- }
-
- ret = RING_SPACE(evo, 2 + (update ? 2 : 0));
- if (ret == 0) {
- BEGIN_NV04(evo, 0, NV50_EVO_CRTC(head, DITHER_CTRL), 1);
- OUT_RING (evo, mode);
- if (update) {
- BEGIN_NV04(evo, 0, NV50_EVO_UPDATE, 1);
- OUT_RING (evo, 0);
- FIRE_RING (evo);
- }
- }
-
- return ret;
-}
-
-static int
-nv50_crtc_set_color_vibrance(struct nouveau_crtc *nv_crtc, bool update)
-{
- struct drm_device *dev = nv_crtc->base.dev;
- struct nouveau_drm *drm = nouveau_drm(dev);
- struct nouveau_channel *evo = nv50_display(dev)->master;
- int ret;
- int adj;
- u32 hue, vib;
-
- NV_DEBUG(drm, "vibrance = %i, hue = %i\n",
- nv_crtc->color_vibrance, nv_crtc->vibrant_hue);
-
- ret = RING_SPACE(evo, 2 + (update ? 2 : 0));
- if (ret) {
- NV_ERROR(drm, "no space while setting color vibrance\n");
- return ret;
- }
-
- adj = (nv_crtc->color_vibrance > 0) ? 50 : 0;
- vib = ((nv_crtc->color_vibrance * 2047 + adj) / 100) & 0xfff;
-
- hue = ((nv_crtc->vibrant_hue * 2047) / 100) & 0xfff;
-
- BEGIN_NV04(evo, 0, NV50_EVO_CRTC(nv_crtc->index, COLOR_CTRL), 1);
- OUT_RING (evo, (hue << 20) | (vib << 8));
-
- if (update) {
- BEGIN_NV04(evo, 0, NV50_EVO_UPDATE, 1);
- OUT_RING (evo, 0);
- FIRE_RING (evo);
- }
-
- return 0;
-}
-
-struct nouveau_connector *
-nouveau_crtc_connector_get(struct nouveau_crtc *nv_crtc)
-{
- struct drm_device *dev = nv_crtc->base.dev;
- struct drm_connector *connector;
- struct drm_crtc *crtc = to_drm_crtc(nv_crtc);
-
- /* The safest approach is to find an encoder with the right crtc, that
- * is also linked to a connector. */
- list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
- if (connector->encoder)
- if (connector->encoder->crtc == crtc)
- return nouveau_connector(connector);
- }
-
- return NULL;
-}
-
-static int
-nv50_crtc_set_scale(struct nouveau_crtc *nv_crtc, bool update)
-{
- struct nouveau_connector *nv_connector;
- struct drm_crtc *crtc = &nv_crtc->base;
- struct drm_device *dev = crtc->dev;
- struct nouveau_drm *drm = nouveau_drm(dev);
- struct nouveau_channel *evo = nv50_display(dev)->master;
- struct drm_display_mode *umode = &crtc->mode;
- struct drm_display_mode *omode;
- int scaling_mode, ret;
- u32 ctrl = 0, oX, oY;
-
- NV_DEBUG(drm, "\n");
-
- nv_connector = nouveau_crtc_connector_get(nv_crtc);
- if (!nv_connector || !nv_connector->native_mode) {
- NV_ERROR(drm, "no native mode, forcing panel scaling\n");
- scaling_mode = DRM_MODE_SCALE_NONE;
- } else {
- scaling_mode = nv_connector->scaling_mode;
- }
-
- /* start off at the resolution we programmed the crtc for, this
- * effectively handles NONE/FULL scaling
- */
- if (scaling_mode != DRM_MODE_SCALE_NONE)
- omode = nv_connector->native_mode;
- else
- omode = umode;
-
- oX = omode->hdisplay;
- oY = omode->vdisplay;
- if (omode->flags & DRM_MODE_FLAG_DBLSCAN)
- oY *= 2;
-
- /* add overscan compensation if necessary, will keep the aspect
- * ratio the same as the backend mode unless overridden by the
- * user setting both hborder and vborder properties.
- */
- if (nv_connector && ( nv_connector->underscan == UNDERSCAN_ON ||
- (nv_connector->underscan == UNDERSCAN_AUTO &&
- nv_connector->edid &&
- drm_detect_hdmi_monitor(nv_connector->edid)))) {
- u32 bX = nv_connector->underscan_hborder;
- u32 bY = nv_connector->underscan_vborder;
- u32 aspect = (oY << 19) / oX;
-
- if (bX) {
- oX -= (bX * 2);
- if (bY) oY -= (bY * 2);
- else oY = ((oX * aspect) + (aspect / 2)) >> 19;
- } else {
- oX -= (oX >> 4) + 32;
- if (bY) oY -= (bY * 2);
- else oY = ((oX * aspect) + (aspect / 2)) >> 19;
- }
- }
-
- /* handle CENTER/ASPECT scaling, taking into account the areas
- * removed already for overscan compensation
- */
- switch (scaling_mode) {
- case DRM_MODE_SCALE_CENTER:
- oX = min((u32)umode->hdisplay, oX);
- oY = min((u32)umode->vdisplay, oY);
- /* fall-through */
- case DRM_MODE_SCALE_ASPECT:
- if (oY < oX) {
- u32 aspect = (umode->hdisplay << 19) / umode->vdisplay;
- oX = ((oY * aspect) + (aspect / 2)) >> 19;
- } else {
- u32 aspect = (umode->vdisplay << 19) / umode->hdisplay;
- oY = ((oX * aspect) + (aspect / 2)) >> 19;
- }
- break;
- default:
- break;
- }
-
- if (umode->hdisplay != oX || umode->vdisplay != oY ||
- umode->flags & DRM_MODE_FLAG_INTERLACE ||
- umode->flags & DRM_MODE_FLAG_DBLSCAN)
- ctrl |= NV50_EVO_CRTC_SCALE_CTRL_ACTIVE;
-
- ret = RING_SPACE(evo, 5);
- if (ret)
- return ret;
-
- BEGIN_NV04(evo, 0, NV50_EVO_CRTC(nv_crtc->index, SCALE_CTRL), 1);
- OUT_RING (evo, ctrl);
- BEGIN_NV04(evo, 0, NV50_EVO_CRTC(nv_crtc->index, SCALE_RES1), 2);
- OUT_RING (evo, oY << 16 | oX);
- OUT_RING (evo, oY << 16 | oX);
-
- if (update) {
- nv50_display_flip_stop(crtc);
- nv50_display_sync(dev);
- nv50_display_flip_next(crtc, crtc->fb, NULL);
- }
-
- return 0;
-}
-
-int
-nv50_crtc_set_clock(struct drm_device *dev, int head, int pclk)
-{
- struct nouveau_device *device = nouveau_dev(dev);
- struct nouveau_clock *clk = nouveau_clock(device);
-
- return clk->pll_set(clk, PLL_VPLL0 + head, pclk);
-}
-
-static void
-nv50_crtc_destroy(struct drm_crtc *crtc)
-{
- struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
- struct nouveau_drm *drm = nouveau_drm(crtc->dev);
-
- NV_DEBUG(drm, "\n");
-
- nouveau_bo_unmap(nv_crtc->lut.nvbo);
- nouveau_bo_ref(NULL, &nv_crtc->lut.nvbo);
- nouveau_bo_unmap(nv_crtc->cursor.nvbo);
- nouveau_bo_ref(NULL, &nv_crtc->cursor.nvbo);
- drm_crtc_cleanup(&nv_crtc->base);
- kfree(nv_crtc);
-}
-
-int
-nv50_crtc_cursor_set(struct drm_crtc *crtc, struct drm_file *file_priv,
- uint32_t buffer_handle, uint32_t width, uint32_t height)
-{
- struct drm_device *dev = crtc->dev;
- struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
- struct nouveau_bo *cursor = NULL;
- struct drm_gem_object *gem;
- int ret = 0, i;
-
- if (!buffer_handle) {
- nv_crtc->cursor.hide(nv_crtc, true);
- return 0;
- }
-
- if (width != 64 || height != 64)
- return -EINVAL;
-
- gem = drm_gem_object_lookup(dev, file_priv, buffer_handle);
- if (!gem)
- return -ENOENT;
- cursor = nouveau_gem_object(gem);
-
- ret = nouveau_bo_map(cursor);
- if (ret)
- goto out;
-
- /* The simple will do for now. */
- for (i = 0; i < 64 * 64; i++)
- nouveau_bo_wr32(nv_crtc->cursor.nvbo, i, nouveau_bo_rd32(cursor, i));
-
- nouveau_bo_unmap(cursor);
-
- nv_crtc->cursor.set_offset(nv_crtc, nv_crtc->cursor.nvbo->bo.offset);
- nv_crtc->cursor.show(nv_crtc, true);
-
-out:
- drm_gem_object_unreference_unlocked(gem);
- return ret;
-}
-
-int
-nv50_crtc_cursor_move(struct drm_crtc *crtc, int x, int y)
-{
- struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
-
- nv_crtc->cursor.set_pos(nv_crtc, x, y);
- return 0;
-}
-
-static void
-nv50_crtc_gamma_set(struct drm_crtc *crtc, u16 *r, u16 *g, u16 *b,
- uint32_t start, uint32_t size)
-{
- int end = (start + size > 256) ? 256 : start + size, i;
- struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
-
- for (i = start; i < end; i++) {
- nv_crtc->lut.r[i] = r[i];
- nv_crtc->lut.g[i] = g[i];
- nv_crtc->lut.b[i] = b[i];
- }
-
- /* We need to know the depth before we upload, but it's possible to
- * get called before a framebuffer is bound. If this is the case,
- * mark the lut values as dirty by setting depth==0, and it'll be
- * uploaded on the first mode_set_base()
- */
- if (!nv_crtc->base.fb) {
- nv_crtc->lut.depth = 0;
- return;
- }
-
- nv50_crtc_lut_load(crtc);
-}
-
-static void
-nv50_crtc_save(struct drm_crtc *crtc)
-{
- struct nouveau_drm *drm = nouveau_drm(crtc->dev);
- NV_ERROR(drm, "!!\n");
-}
-
-static void
-nv50_crtc_restore(struct drm_crtc *crtc)
-{
- struct nouveau_drm *drm = nouveau_drm(crtc->dev);
- NV_ERROR(drm, "!!\n");
-}
-
-static const struct drm_crtc_funcs nv50_crtc_funcs = {
- .save = nv50_crtc_save,
- .restore = nv50_crtc_restore,
- .cursor_set = nv50_crtc_cursor_set,
- .cursor_move = nv50_crtc_cursor_move,
- .gamma_set = nv50_crtc_gamma_set,
- .set_config = drm_crtc_helper_set_config,
- .page_flip = nouveau_crtc_page_flip,
- .destroy = nv50_crtc_destroy,
-};
-
-static void
-nv50_crtc_dpms(struct drm_crtc *crtc, int mode)
-{
-}
-
-static void
-nv50_crtc_prepare(struct drm_crtc *crtc)
-{
- struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
- struct drm_device *dev = crtc->dev;
- struct nouveau_drm *drm = nouveau_drm(dev);
-
- NV_DEBUG(drm, "index %d\n", nv_crtc->index);
-
- nv50_display_flip_stop(crtc);
- drm_vblank_pre_modeset(dev, nv_crtc->index);
- nv50_crtc_blank(nv_crtc, true);
-}
-
-static void
-nv50_crtc_commit(struct drm_crtc *crtc)
-{
- struct drm_device *dev = crtc->dev;
- struct nouveau_drm *drm = nouveau_drm(dev);
- struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
-
- NV_DEBUG(drm, "index %d\n", nv_crtc->index);
-
- nv50_crtc_blank(nv_crtc, false);
- drm_vblank_post_modeset(dev, nv_crtc->index);
- nv50_display_sync(dev);
- nv50_display_flip_next(crtc, crtc->fb, NULL);
-}
-
-static bool
-nv50_crtc_mode_fixup(struct drm_crtc *crtc, const struct drm_display_mode *mode,
- struct drm_display_mode *adjusted_mode)
-{
- return true;
-}
-
-static int
-nv50_crtc_do_mode_set_base(struct drm_crtc *crtc,
- struct drm_framebuffer *passed_fb,
- int x, int y, bool atomic)
-{
- struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
- struct drm_device *dev = nv_crtc->base.dev;
- struct nouveau_drm *drm = nouveau_drm(dev);
- struct nouveau_channel *evo = nv50_display(dev)->master;
- struct drm_framebuffer *drm_fb;
- struct nouveau_framebuffer *fb;
- int ret;
-
- NV_DEBUG(drm, "index %d\n", nv_crtc->index);
-
- /* no fb bound */
- if (!atomic && !crtc->fb) {
- NV_DEBUG(drm, "No FB bound\n");
- return 0;
- }
-
- /* If atomic, we want to switch to the fb we were passed, so
- * now we update pointers to do that. (We don't pin; just
- * assume we're already pinned and update the base address.)
- */
- if (atomic) {
- drm_fb = passed_fb;
- fb = nouveau_framebuffer(passed_fb);
- } else {
- drm_fb = crtc->fb;
- fb = nouveau_framebuffer(crtc->fb);
- /* If not atomic, we can go ahead and pin, and unpin the
- * old fb we were passed.
- */
- ret = nouveau_bo_pin(fb->nvbo, TTM_PL_FLAG_VRAM);
- if (ret)
- return ret;
-
- if (passed_fb) {
- struct nouveau_framebuffer *ofb = nouveau_framebuffer(passed_fb);
- nouveau_bo_unpin(ofb->nvbo);
- }
- }
-
- nv_crtc->fb.offset = fb->nvbo->bo.offset;
- nv_crtc->fb.tile_flags = nouveau_bo_tile_layout(fb->nvbo);
- nv_crtc->fb.cpp = drm_fb->bits_per_pixel / 8;
- if (!nv_crtc->fb.blanked && nv_device(drm->device)->chipset != 0x50) {
- ret = RING_SPACE(evo, 2);
- if (ret)
- return ret;
-
- BEGIN_NV04(evo, 0, NV50_EVO_CRTC(nv_crtc->index, FB_DMA), 1);
- OUT_RING (evo, fb->r_dma);
- }
-
- ret = RING_SPACE(evo, 12);
- if (ret)
- return ret;
-
- BEGIN_NV04(evo, 0, NV50_EVO_CRTC(nv_crtc->index, FB_OFFSET), 5);
- OUT_RING (evo, nv_crtc->fb.offset >> 8);
- OUT_RING (evo, 0);
- OUT_RING (evo, (drm_fb->height << 16) | drm_fb->width);
- OUT_RING (evo, fb->r_pitch);
- OUT_RING (evo, fb->r_format);
-
- BEGIN_NV04(evo, 0, NV50_EVO_CRTC(nv_crtc->index, CLUT_MODE), 1);
- OUT_RING (evo, fb->base.depth == 8 ?
- NV50_EVO_CRTC_CLUT_MODE_OFF : NV50_EVO_CRTC_CLUT_MODE_ON);
-
- BEGIN_NV04(evo, 0, NV50_EVO_CRTC(nv_crtc->index, FB_POS), 1);
- OUT_RING (evo, (y << 16) | x);
-
- if (nv_crtc->lut.depth != fb->base.depth) {
- nv_crtc->lut.depth = fb->base.depth;
- nv50_crtc_lut_load(crtc);
- }
-
- return 0;
-}
-
-static int
-nv50_crtc_mode_set(struct drm_crtc *crtc, struct drm_display_mode *umode,
- struct drm_display_mode *mode, int x, int y,
- struct drm_framebuffer *old_fb)
-{
- struct drm_device *dev = crtc->dev;
- struct nouveau_channel *evo = nv50_display(dev)->master;
- struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
- u32 head = nv_crtc->index * 0x400;
- u32 ilace = (mode->flags & DRM_MODE_FLAG_INTERLACE) ? 2 : 1;
- u32 vscan = (mode->flags & DRM_MODE_FLAG_DBLSCAN) ? 2 : 1;
- u32 hactive, hsynce, hbackp, hfrontp, hblanke, hblanks;
- u32 vactive, vsynce, vbackp, vfrontp, vblanke, vblanks;
- u32 vblan2e = 0, vblan2s = 1;
- int ret;
-
- /* hw timing description looks like this:
- *
- * <sync> <back porch> <---------display---------> <front porch>
- * ______
- * |____________|---------------------------|____________|
- *
- * ^ synce ^ blanke ^ blanks ^ active
- *
- * interlaced modes also have 2 additional values pointing at the end
- * and start of the next field's blanking period.
- */
-
- hactive = mode->htotal;
- hsynce = mode->hsync_end - mode->hsync_start - 1;
- hbackp = mode->htotal - mode->hsync_end;
- hblanke = hsynce + hbackp;
- hfrontp = mode->hsync_start - mode->hdisplay;
- hblanks = mode->htotal - hfrontp - 1;
-
- vactive = mode->vtotal * vscan / ilace;
- vsynce = ((mode->vsync_end - mode->vsync_start) * vscan / ilace) - 1;
- vbackp = (mode->vtotal - mode->vsync_end) * vscan / ilace;
- vblanke = vsynce + vbackp;
- vfrontp = (mode->vsync_start - mode->vdisplay) * vscan / ilace;
- vblanks = vactive - vfrontp - 1;
- if (mode->flags & DRM_MODE_FLAG_INTERLACE) {
- vblan2e = vactive + vsynce + vbackp;
- vblan2s = vblan2e + (mode->vdisplay * vscan / ilace);
- vactive = (vactive * 2) + 1;
- }
-
- ret = RING_SPACE(evo, 18);
- if (ret == 0) {
- BEGIN_NV04(evo, 0, 0x0804 + head, 2);
- OUT_RING (evo, 0x00800000 | mode->clock);
- OUT_RING (evo, (ilace == 2) ? 2 : 0);
- BEGIN_NV04(evo, 0, 0x0810 + head, 6);
- OUT_RING (evo, 0x00000000); /* border colour */
- OUT_RING (evo, (vactive << 16) | hactive);
- OUT_RING (evo, ( vsynce << 16) | hsynce);
- OUT_RING (evo, (vblanke << 16) | hblanke);
- OUT_RING (evo, (vblanks << 16) | hblanks);
- OUT_RING (evo, (vblan2e << 16) | vblan2s);
- BEGIN_NV04(evo, 0, 0x082c + head, 1);
- OUT_RING (evo, 0x00000000);
- BEGIN_NV04(evo, 0, 0x0900 + head, 1);
- OUT_RING (evo, 0x00000311); /* makes sync channel work */
- BEGIN_NV04(evo, 0, 0x08c8 + head, 1);
- OUT_RING (evo, (umode->vdisplay << 16) | umode->hdisplay);
- BEGIN_NV04(evo, 0, 0x08d4 + head, 1);
- OUT_RING (evo, 0x00000000); /* screen position */
- }
-
- nv_crtc->set_dither(nv_crtc, false);
- nv_crtc->set_scale(nv_crtc, false);
- nv_crtc->set_color_vibrance(nv_crtc, false);
-
- return nv50_crtc_do_mode_set_base(crtc, old_fb, x, y, false);
-}
-
-static int
-nv50_crtc_mode_set_base(struct drm_crtc *crtc, int x, int y,
- struct drm_framebuffer *old_fb)
-{
- int ret;
-
- nv50_display_flip_stop(crtc);
- ret = nv50_crtc_do_mode_set_base(crtc, old_fb, x, y, false);
- if (ret)
- return ret;
-
- ret = nv50_display_sync(crtc->dev);
- if (ret)
- return ret;
-
- return nv50_display_flip_next(crtc, crtc->fb, NULL);
-}
-
-static int
-nv50_crtc_mode_set_base_atomic(struct drm_crtc *crtc,
- struct drm_framebuffer *fb,
- int x, int y, enum mode_set_atomic state)
-{
- int ret;
-
- nv50_display_flip_stop(crtc);
- ret = nv50_crtc_do_mode_set_base(crtc, fb, x, y, true);
- if (ret)
- return ret;
-
- return nv50_display_sync(crtc->dev);
-}
-
-static const struct drm_crtc_helper_funcs nv50_crtc_helper_funcs = {
- .dpms = nv50_crtc_dpms,
- .prepare = nv50_crtc_prepare,
- .commit = nv50_crtc_commit,
- .mode_fixup = nv50_crtc_mode_fixup,
- .mode_set = nv50_crtc_mode_set,
- .mode_set_base = nv50_crtc_mode_set_base,
- .mode_set_base_atomic = nv50_crtc_mode_set_base_atomic,
- .load_lut = nv50_crtc_lut_load,
-};
-
-int
-nv50_crtc_create(struct drm_device *dev, int index)
-{
- struct nouveau_drm *drm = nouveau_drm(dev);
- struct nouveau_crtc *nv_crtc = NULL;
- int ret, i;
-
- NV_DEBUG(drm, "\n");
-
- nv_crtc = kzalloc(sizeof(*nv_crtc), GFP_KERNEL);
- if (!nv_crtc)
- return -ENOMEM;
-
- nv_crtc->index = index;
- nv_crtc->set_dither = nv50_crtc_set_dither;
- nv_crtc->set_scale = nv50_crtc_set_scale;
- nv_crtc->set_color_vibrance = nv50_crtc_set_color_vibrance;
- nv_crtc->color_vibrance = 50;
- nv_crtc->vibrant_hue = 0;
- nv_crtc->lut.depth = 0;
- for (i = 0; i < 256; i++) {
- nv_crtc->lut.r[i] = i << 8;
- nv_crtc->lut.g[i] = i << 8;
- nv_crtc->lut.b[i] = i << 8;
- }
-
- drm_crtc_init(dev, &nv_crtc->base, &nv50_crtc_funcs);
- drm_crtc_helper_add(&nv_crtc->base, &nv50_crtc_helper_funcs);
- drm_mode_crtc_set_gamma_size(&nv_crtc->base, 256);
-
- ret = nouveau_bo_new(dev, 4096, 0x100, TTM_PL_FLAG_VRAM,
- 0, 0x0000, NULL, &nv_crtc->lut.nvbo);
- if (!ret) {
- ret = nouveau_bo_pin(nv_crtc->lut.nvbo, TTM_PL_FLAG_VRAM);
- if (!ret)
- ret = nouveau_bo_map(nv_crtc->lut.nvbo);
- if (ret)
- nouveau_bo_ref(NULL, &nv_crtc->lut.nvbo);
- }
-
- if (ret)
- goto out;
-
-
- ret = nouveau_bo_new(dev, 64*64*4, 0x100, TTM_PL_FLAG_VRAM,
- 0, 0x0000, NULL, &nv_crtc->cursor.nvbo);
- if (!ret) {
- ret = nouveau_bo_pin(nv_crtc->cursor.nvbo, TTM_PL_FLAG_VRAM);
- if (!ret)
- ret = nouveau_bo_map(nv_crtc->cursor.nvbo);
- if (ret)
- nouveau_bo_ref(NULL, &nv_crtc->cursor.nvbo);
- }
-
- if (ret)
- goto out;
-
- nv50_cursor_init(nv_crtc);
-out:
- if (ret)
- nv50_crtc_destroy(&nv_crtc->base);
- return ret;
-}
diff --git a/drivers/gpu/drm/nouveau/nv50_cursor.c b/drivers/gpu/drm/nouveau/nv50_cursor.c
deleted file mode 100644
index 223da113ceee..000000000000
--- a/drivers/gpu/drm/nouveau/nv50_cursor.c
+++ /dev/null
@@ -1,136 +0,0 @@
-/*
- * Copyright (C) 2008 Maarten Maathuis.
- * All Rights Reserved.
- *
- * Permission is hereby granted, free of charge, to any person obtaining
- * a copy of this software and associated documentation files (the
- * "Software"), to deal in the Software without restriction, including
- * without limitation the rights to use, copy, modify, merge, publish,
- * distribute, sublicense, and/or sell copies of the Software, and to
- * permit persons to whom the Software is furnished to do so, subject to
- * the following conditions:
- *
- * The above copyright notice and this permission notice (including the
- * next paragraph) shall be included in all copies or substantial
- * portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
- * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
- * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
- * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
- * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
- *
- */
-
-#include <drm/drmP.h>
-
-#include "nouveau_drm.h"
-#include "nouveau_dma.h"
-#include "nouveau_crtc.h"
-#include "nv50_display.h"
-
-static void
-nv50_cursor_show(struct nouveau_crtc *nv_crtc, bool update)
-{
- struct drm_device *dev = nv_crtc->base.dev;
- struct nouveau_drm *drm = nouveau_drm(dev);
- struct nouveau_channel *evo = nv50_display(dev)->master;
- int ret;
-
- NV_DEBUG(drm, "\n");
-
- if (update && nv_crtc->cursor.visible)
- return;
-
- ret = RING_SPACE(evo, (nv_device(drm->device)->chipset != 0x50 ? 5 : 3) + update * 2);
- if (ret) {
- NV_ERROR(drm, "no space while unhiding cursor\n");
- return;
- }
-
- if (nv_device(drm->device)->chipset != 0x50) {
- BEGIN_NV04(evo, 0, NV84_EVO_CRTC(nv_crtc->index, CURSOR_DMA), 1);
- OUT_RING(evo, NvEvoVRAM);
- }
- BEGIN_NV04(evo, 0, NV50_EVO_CRTC(nv_crtc->index, CURSOR_CTRL), 2);
- OUT_RING(evo, NV50_EVO_CRTC_CURSOR_CTRL_SHOW);
- OUT_RING(evo, nv_crtc->cursor.offset >> 8);
-
- if (update) {
- BEGIN_NV04(evo, 0, NV50_EVO_UPDATE, 1);
- OUT_RING(evo, 0);
- FIRE_RING(evo);
- nv_crtc->cursor.visible = true;
- }
-}
-
-static void
-nv50_cursor_hide(struct nouveau_crtc *nv_crtc, bool update)
-{
- struct drm_device *dev = nv_crtc->base.dev;
- struct nouveau_drm *drm = nouveau_drm(dev);
- struct nouveau_channel *evo = nv50_display(dev)->master;
- int ret;
-
- NV_DEBUG(drm, "\n");
-
- if (update && !nv_crtc->cursor.visible)
- return;
-
- ret = RING_SPACE(evo, (nv_device(drm->device)->chipset != 0x50 ? 5 : 3) + update * 2);
- if (ret) {
- NV_ERROR(drm, "no space while hiding cursor\n");
- return;
- }
- BEGIN_NV04(evo, 0, NV50_EVO_CRTC(nv_crtc->index, CURSOR_CTRL), 2);
- OUT_RING(evo, NV50_EVO_CRTC_CURSOR_CTRL_HIDE);
- OUT_RING(evo, 0);
- if (nv_device(drm->device)->chipset != 0x50) {
- BEGIN_NV04(evo, 0, NV84_EVO_CRTC(nv_crtc->index, CURSOR_DMA), 1);
- OUT_RING(evo, NV84_EVO_CRTC_CURSOR_DMA_HANDLE_NONE);
- }
-
- if (update) {
- BEGIN_NV04(evo, 0, NV50_EVO_UPDATE, 1);
- OUT_RING(evo, 0);
- FIRE_RING(evo);
- nv_crtc->cursor.visible = false;
- }
-}
-
-static void
-nv50_cursor_set_pos(struct nouveau_crtc *nv_crtc, int x, int y)
-{
- struct nouveau_device *device = nouveau_dev(nv_crtc->base.dev);
-
- nv_crtc->cursor_saved_x = x; nv_crtc->cursor_saved_y = y;
- nv_wr32(device, NV50_PDISPLAY_CURSOR_USER_POS(nv_crtc->index),
- ((y & 0xFFFF) << 16) | (x & 0xFFFF));
- /* Needed to make the cursor move. */
- nv_wr32(device, NV50_PDISPLAY_CURSOR_USER_POS_CTRL(nv_crtc->index), 0);
-}
-
-static void
-nv50_cursor_set_offset(struct nouveau_crtc *nv_crtc, uint32_t offset)
-{
- if (offset == nv_crtc->cursor.offset)
- return;
-
- nv_crtc->cursor.offset = offset;
- if (nv_crtc->cursor.visible) {
- nv_crtc->cursor.visible = false;
- nv_crtc->cursor.show(nv_crtc, true);
- }
-}
-
-int
-nv50_cursor_init(struct nouveau_crtc *nv_crtc)
-{
- nv_crtc->cursor.set_offset = nv50_cursor_set_offset;
- nv_crtc->cursor.set_pos = nv50_cursor_set_pos;
- nv_crtc->cursor.hide = nv50_cursor_hide;
- nv_crtc->cursor.show = nv50_cursor_show;
- return 0;
-}
diff --git a/drivers/gpu/drm/nouveau/nv50_dac.c b/drivers/gpu/drm/nouveau/nv50_dac.c
deleted file mode 100644
index 6a30a1748573..000000000000
--- a/drivers/gpu/drm/nouveau/nv50_dac.c
+++ /dev/null
@@ -1,321 +0,0 @@
-/*
- * Copyright (C) 2008 Maarten Maathuis.
- * All Rights Reserved.
- *
- * Permission is hereby granted, free of charge, to any person obtaining
- * a copy of this software and associated documentation files (the
- * "Software"), to deal in the Software without restriction, including
- * without limitation the rights to use, copy, modify, merge, publish,
- * distribute, sublicense, and/or sell copies of the Software, and to
- * permit persons to whom the Software is furnished to do so, subject to
- * the following conditions:
- *
- * The above copyright notice and this permission notice (including the
- * next paragraph) shall be included in all copies or substantial
- * portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
- * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
- * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
- * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
- * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
- *
- */
-
-#include <drm/drmP.h>
-#include <drm/drm_crtc_helper.h>
-
-#define NOUVEAU_DMA_DEBUG (nouveau_reg_debug & NOUVEAU_REG_DEBUG_EVO)
-#include "nouveau_reg.h"
-#include "nouveau_drm.h"
-#include "nouveau_dma.h"
-#include "nouveau_encoder.h"
-#include "nouveau_connector.h"
-#include "nouveau_crtc.h"
-#include "nv50_display.h"
-
-#include <subdev/timer.h>
-
-static void
-nv50_dac_disconnect(struct drm_encoder *encoder)
-{
- struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
- struct drm_device *dev = encoder->dev;
- struct nouveau_drm *drm = nouveau_drm(dev);
- struct nouveau_channel *evo = nv50_display(dev)->master;
- int ret;
-
- if (!nv_encoder->crtc)
- return;
- nv50_crtc_blank(nouveau_crtc(nv_encoder->crtc), true);
-
- NV_DEBUG(drm, "Disconnecting DAC %d\n", nv_encoder->or);
-
- ret = RING_SPACE(evo, 4);
- if (ret) {
- NV_ERROR(drm, "no space while disconnecting DAC\n");
- return;
- }
- BEGIN_NV04(evo, 0, NV50_EVO_DAC(nv_encoder->or, MODE_CTRL), 1);
- OUT_RING (evo, 0);
- BEGIN_NV04(evo, 0, NV50_EVO_UPDATE, 1);
- OUT_RING (evo, 0);
-
- nv_encoder->crtc = NULL;
-}
-
-static enum drm_connector_status
-nv50_dac_detect(struct drm_encoder *encoder, struct drm_connector *connector)
-{
- struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
- struct nouveau_device *device = nouveau_dev(encoder->dev);
- struct nouveau_drm *drm = nouveau_drm(encoder->dev);
- enum drm_connector_status status = connector_status_disconnected;
- uint32_t dpms_state, load_pattern, load_state;
- int or = nv_encoder->or;
-
- nv_wr32(device, NV50_PDISPLAY_DAC_CLK_CTRL1(or), 0x00000001);
- dpms_state = nv_rd32(device, NV50_PDISPLAY_DAC_DPMS_CTRL(or));
-
- nv_wr32(device, NV50_PDISPLAY_DAC_DPMS_CTRL(or),
- 0x00150000 | NV50_PDISPLAY_DAC_DPMS_CTRL_PENDING);
- if (!nv_wait(device, NV50_PDISPLAY_DAC_DPMS_CTRL(or),
- NV50_PDISPLAY_DAC_DPMS_CTRL_PENDING, 0)) {
- NV_ERROR(drm, "timeout: DAC_DPMS_CTRL_PENDING(%d) == 0\n", or);
- NV_ERROR(drm, "DAC_DPMS_CTRL(%d) = 0x%08x\n", or,
- nv_rd32(device, NV50_PDISPLAY_DAC_DPMS_CTRL(or)));
- return status;
- }
-
- /* Use bios provided value if possible. */
- if (drm->vbios.dactestval) {
- load_pattern = drm->vbios.dactestval;
- NV_DEBUG(drm, "Using bios provided load_pattern of %d\n",
- load_pattern);
- } else {
- load_pattern = 340;
- NV_DEBUG(drm, "Using default load_pattern of %d\n",
- load_pattern);
- }
-
- nv_wr32(device, NV50_PDISPLAY_DAC_LOAD_CTRL(or),
- NV50_PDISPLAY_DAC_LOAD_CTRL_ACTIVE | load_pattern);
- mdelay(45); /* give it some time to process */
- load_state = nv_rd32(device, NV50_PDISPLAY_DAC_LOAD_CTRL(or));
-
- nv_wr32(device, NV50_PDISPLAY_DAC_LOAD_CTRL(or), 0);
- nv_wr32(device, NV50_PDISPLAY_DAC_DPMS_CTRL(or), dpms_state |
- NV50_PDISPLAY_DAC_DPMS_CTRL_PENDING);
-
- if ((load_state & NV50_PDISPLAY_DAC_LOAD_CTRL_PRESENT) ==
- NV50_PDISPLAY_DAC_LOAD_CTRL_PRESENT)
- status = connector_status_connected;
-
- if (status == connector_status_connected)
- NV_DEBUG(drm, "Load was detected on output with or %d\n", or);
- else
- NV_DEBUG(drm, "Load was not detected on output with or %d\n", or);
-
- return status;
-}
-
-static void
-nv50_dac_dpms(struct drm_encoder *encoder, int mode)
-{
- struct nouveau_device *device = nouveau_dev(encoder->dev);
- struct nouveau_drm *drm = nouveau_drm(encoder->dev);
- struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
- uint32_t val;
- int or = nv_encoder->or;
-
- NV_DEBUG(drm, "or %d mode %d\n", or, mode);
-
- /* wait for it to be done */
- if (!nv_wait(device, NV50_PDISPLAY_DAC_DPMS_CTRL(or),
- NV50_PDISPLAY_DAC_DPMS_CTRL_PENDING, 0)) {
- NV_ERROR(drm, "timeout: DAC_DPMS_CTRL_PENDING(%d) == 0\n", or);
- NV_ERROR(drm, "DAC_DPMS_CTRL(%d) = 0x%08x\n", or,
- nv_rd32(device, NV50_PDISPLAY_DAC_DPMS_CTRL(or)));
- return;
- }
-
- val = nv_rd32(device, NV50_PDISPLAY_DAC_DPMS_CTRL(or)) & ~0x7F;
-
- if (mode != DRM_MODE_DPMS_ON)
- val |= NV50_PDISPLAY_DAC_DPMS_CTRL_BLANKED;
-
- switch (mode) {
- case DRM_MODE_DPMS_STANDBY:
- val |= NV50_PDISPLAY_DAC_DPMS_CTRL_HSYNC_OFF;
- break;
- case DRM_MODE_DPMS_SUSPEND:
- val |= NV50_PDISPLAY_DAC_DPMS_CTRL_VSYNC_OFF;
- break;
- case DRM_MODE_DPMS_OFF:
- val |= NV50_PDISPLAY_DAC_DPMS_CTRL_OFF;
- val |= NV50_PDISPLAY_DAC_DPMS_CTRL_HSYNC_OFF;
- val |= NV50_PDISPLAY_DAC_DPMS_CTRL_VSYNC_OFF;
- break;
- default:
- break;
- }
-
- nv_wr32(device, NV50_PDISPLAY_DAC_DPMS_CTRL(or), val |
- NV50_PDISPLAY_DAC_DPMS_CTRL_PENDING);
-}
-
-static void
-nv50_dac_save(struct drm_encoder *encoder)
-{
- struct nouveau_drm *drm = nouveau_drm(encoder->dev);
- NV_ERROR(drm, "!!\n");
-}
-
-static void
-nv50_dac_restore(struct drm_encoder *encoder)
-{
- struct nouveau_drm *drm = nouveau_drm(encoder->dev);
- NV_ERROR(drm, "!!\n");
-}
-
-static bool
-nv50_dac_mode_fixup(struct drm_encoder *encoder,
- const struct drm_display_mode *mode,
- struct drm_display_mode *adjusted_mode)
-{
- struct nouveau_drm *drm = nouveau_drm(encoder->dev);
- struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
- struct nouveau_connector *connector;
-
- NV_DEBUG(drm, "or %d\n", nv_encoder->or);
-
- connector = nouveau_encoder_connector_get(nv_encoder);
- if (!connector) {
- NV_ERROR(drm, "Encoder has no connector\n");
- return false;
- }
-
- if (connector->scaling_mode != DRM_MODE_SCALE_NONE &&
- connector->native_mode)
- drm_mode_copy(adjusted_mode, connector->native_mode);
-
- return true;
-}
-
-static void
-nv50_dac_commit(struct drm_encoder *encoder)
-{
-}
-
-static void
-nv50_dac_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode,
- struct drm_display_mode *adjusted_mode)
-{
- struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
- struct nouveau_drm *drm = nouveau_drm(encoder->dev);
- struct drm_device *dev = encoder->dev;
- struct nouveau_channel *evo = nv50_display(dev)->master;
- struct nouveau_crtc *crtc = nouveau_crtc(encoder->crtc);
- uint32_t mode_ctl = 0, mode_ctl2 = 0;
- int ret;
-
- NV_DEBUG(drm, "or %d type %d crtc %d\n",
- nv_encoder->or, nv_encoder->dcb->type, crtc->index);
-
- nv50_dac_dpms(encoder, DRM_MODE_DPMS_ON);
-
- if (crtc->index == 1)
- mode_ctl |= NV50_EVO_DAC_MODE_CTRL_CRTC1;
- else
- mode_ctl |= NV50_EVO_DAC_MODE_CTRL_CRTC0;
-
- /* Lacking a working tv-out, this is not a 100% sure. */
- if (nv_encoder->dcb->type == DCB_OUTPUT_ANALOG)
- mode_ctl |= 0x40;
- else
- if (nv_encoder->dcb->type == DCB_OUTPUT_TV)
- mode_ctl |= 0x100;
-
- if (adjusted_mode->flags & DRM_MODE_FLAG_NHSYNC)
- mode_ctl2 |= NV50_EVO_DAC_MODE_CTRL2_NHSYNC;
-
- if (adjusted_mode->flags & DRM_MODE_FLAG_NVSYNC)
- mode_ctl2 |= NV50_EVO_DAC_MODE_CTRL2_NVSYNC;
-
- ret = RING_SPACE(evo, 3);
- if (ret) {
- NV_ERROR(drm, "no space while connecting DAC\n");
- return;
- }
- BEGIN_NV04(evo, 0, NV50_EVO_DAC(nv_encoder->or, MODE_CTRL), 2);
- OUT_RING(evo, mode_ctl);
- OUT_RING(evo, mode_ctl2);
-
- nv_encoder->crtc = encoder->crtc;
-}
-
-static struct drm_crtc *
-nv50_dac_crtc_get(struct drm_encoder *encoder)
-{
- return nouveau_encoder(encoder)->crtc;
-}
-
-static const struct drm_encoder_helper_funcs nv50_dac_helper_funcs = {
- .dpms = nv50_dac_dpms,
- .save = nv50_dac_save,
- .restore = nv50_dac_restore,
- .mode_fixup = nv50_dac_mode_fixup,
- .prepare = nv50_dac_disconnect,
- .commit = nv50_dac_commit,
- .mode_set = nv50_dac_mode_set,
- .get_crtc = nv50_dac_crtc_get,
- .detect = nv50_dac_detect,
- .disable = nv50_dac_disconnect
-};
-
-static void
-nv50_dac_destroy(struct drm_encoder *encoder)
-{
- struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
- struct nouveau_drm *drm = nouveau_drm(encoder->dev);
-
- if (!encoder)
- return;
-
- NV_DEBUG(drm, "\n");
-
- drm_encoder_cleanup(encoder);
- kfree(nv_encoder);
-}
-
-static const struct drm_encoder_funcs nv50_dac_encoder_funcs = {
- .destroy = nv50_dac_destroy,
-};
-
-int
-nv50_dac_create(struct drm_connector *connector, struct dcb_output *entry)
-{
- struct nouveau_encoder *nv_encoder;
- struct drm_encoder *encoder;
-
- nv_encoder = kzalloc(sizeof(*nv_encoder), GFP_KERNEL);
- if (!nv_encoder)
- return -ENOMEM;
- encoder = to_drm_encoder(nv_encoder);
-
- nv_encoder->dcb = entry;
- nv_encoder->or = ffs(entry->or) - 1;
-
- drm_encoder_init(connector->dev, encoder, &nv50_dac_encoder_funcs,
- DRM_MODE_ENCODER_DAC);
- drm_encoder_helper_add(encoder, &nv50_dac_helper_funcs);
-
- encoder->possible_crtcs = entry->heads;
- encoder->possible_clones = 0;
-
- drm_mode_connector_attach_encoder(connector, encoder);
- return 0;
-}
-
diff --git a/drivers/gpu/drm/nouveau/nv50_display.c b/drivers/gpu/drm/nouveau/nv50_display.c
index f97b42cbb6bb..35874085a61e 100644
--- a/drivers/gpu/drm/nouveau/nv50_display.c
+++ b/drivers/gpu/drm/nouveau/nv50_display.c
@@ -1,969 +1,2058 @@
-/*
- * Copyright (C) 2008 Maarten Maathuis.
- * All Rights Reserved.
+ /*
+ * Copyright 2011 Red Hat Inc.
*
- * Permission is hereby granted, free of charge, to any person obtaining
- * a copy of this software and associated documentation files (the
- * "Software"), to deal in the Software without restriction, including
- * without limitation the rights to use, copy, modify, merge, publish,
- * distribute, sublicense, and/or sell copies of the Software, and to
- * permit persons to whom the Software is furnished to do so, subject to
- * the following conditions:
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
*
- * The above copyright notice and this permission notice (including the
- * next paragraph) shall be included in all copies or substantial
- * portions of the Software.
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
*
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
- * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
- * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
- * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
- * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
*
+ * Authors: Ben Skeggs
*/
+#include <linux/dma-mapping.h>
+
+#include <drm/drmP.h>
+#include <drm/drm_crtc_helper.h>
+
#include "nouveau_drm.h"
#include "nouveau_dma.h"
-
-#include "nv50_display.h"
-#include "nouveau_crtc.h"
-#include "nouveau_encoder.h"
+#include "nouveau_gem.h"
#include "nouveau_connector.h"
-#include "nouveau_fbcon.h"
-#include <drm/drm_crtc_helper.h>
+#include "nouveau_encoder.h"
+#include "nouveau_crtc.h"
#include "nouveau_fence.h"
+#include "nv50_display.h"
+#include <core/client.h>
#include <core/gpuobj.h>
-#include <subdev/timer.h>
+#include <core/class.h>
-static void nv50_display_bh(unsigned long);
-
-static inline int
-nv50_sor_nr(struct drm_device *dev)
+#include <subdev/timer.h>
+#include <subdev/bar.h>
+#include <subdev/fb.h>
+
+#define EVO_DMA_NR 9
+
+#define EVO_MASTER (0x00)
+#define EVO_FLIP(c) (0x01 + (c))
+#define EVO_OVLY(c) (0x05 + (c))
+#define EVO_OIMM(c) (0x09 + (c))
+#define EVO_CURS(c) (0x0d + (c))
+
+/* offsets in shared sync bo of various structures */
+#define EVO_SYNC(c, o) ((c) * 0x0100 + (o))
+#define EVO_MAST_NTFY EVO_SYNC( 0, 0x00)
+#define EVO_FLIP_SEM0(c) EVO_SYNC((c), 0x00)
+#define EVO_FLIP_SEM1(c) EVO_SYNC((c), 0x10)
+
+#define EVO_CORE_HANDLE (0xd1500000)
+#define EVO_CHAN_HANDLE(t,i) (0xd15c0000 | (((t) & 0x00ff) << 8) | (i))
+#define EVO_CHAN_OCLASS(t,c) ((nv_hclass(c) & 0xff00) | ((t) & 0x00ff))
+#define EVO_PUSH_HANDLE(t,i) (0xd15b0000 | (i) | \
+ (((NV50_DISP_##t##_CLASS) & 0x00ff) << 8))
+
+/******************************************************************************
+ * EVO channel
+ *****************************************************************************/
+
+struct nv50_chan {
+ struct nouveau_object *user;
+ u32 handle;
+};
+
+static int
+nv50_chan_create(struct nouveau_object *core, u32 bclass, u8 head,
+ void *data, u32 size, struct nv50_chan *chan)
{
- struct nouveau_device *device = nouveau_dev(dev);
+ struct nouveau_object *client = nv_pclass(core, NV_CLIENT_CLASS);
+ const u32 oclass = EVO_CHAN_OCLASS(bclass, core);
+ const u32 handle = EVO_CHAN_HANDLE(bclass, head);
+ int ret;
- if (device->chipset < 0x90 ||
- device->chipset == 0x92 ||
- device->chipset == 0xa0)
- return 2;
+ ret = nouveau_object_new(client, EVO_CORE_HANDLE, handle,
+ oclass, data, size, &chan->user);
+ if (ret)
+ return ret;
- return 4;
+ chan->handle = handle;
+ return 0;
}
-u32
-nv50_display_active_crtcs(struct drm_device *dev)
+static void
+nv50_chan_destroy(struct nouveau_object *core, struct nv50_chan *chan)
{
- struct nouveau_device *device = nouveau_dev(dev);
- u32 mask = 0;
- int i;
-
- if (device->chipset < 0x90 ||
- device->chipset == 0x92 ||
- device->chipset == 0xa0) {
- for (i = 0; i < 2; i++)
- mask |= nv_rd32(device, NV50_PDISPLAY_SOR_MODE_CTRL_C(i));
- } else {
- for (i = 0; i < 4; i++)
- mask |= nv_rd32(device, NV90_PDISPLAY_SOR_MODE_CTRL_C(i));
- }
+ struct nouveau_object *client = nv_pclass(core, NV_CLIENT_CLASS);
+ if (chan->handle)
+ nouveau_object_del(client, EVO_CORE_HANDLE, chan->handle);
+}
- for (i = 0; i < 3; i++)
- mask |= nv_rd32(device, NV50_PDISPLAY_DAC_MODE_CTRL_C(i));
+/******************************************************************************
+ * PIO EVO channel
+ *****************************************************************************/
- return mask & 3;
-}
+struct nv50_pioc {
+ struct nv50_chan base;
+};
-int
-nv50_display_early_init(struct drm_device *dev)
+static void
+nv50_pioc_destroy(struct nouveau_object *core, struct nv50_pioc *pioc)
{
- return 0;
+ nv50_chan_destroy(core, &pioc->base);
}
-void
-nv50_display_late_takedown(struct drm_device *dev)
+static int
+nv50_pioc_create(struct nouveau_object *core, u32 bclass, u8 head,
+ void *data, u32 size, struct nv50_pioc *pioc)
{
+ return nv50_chan_create(core, bclass, head, data, size, &pioc->base);
}
-int
-nv50_display_sync(struct drm_device *dev)
-{
- struct nv50_display *disp = nv50_display(dev);
- struct nouveau_channel *evo = disp->master;
- int ret;
+/******************************************************************************
+ * DMA EVO channel
+ *****************************************************************************/
- ret = RING_SPACE(evo, 6);
- if (ret == 0) {
- BEGIN_NV04(evo, 0, 0x0084, 1);
- OUT_RING (evo, 0x80000000);
- BEGIN_NV04(evo, 0, 0x0080, 1);
- OUT_RING (evo, 0);
- BEGIN_NV04(evo, 0, 0x0084, 1);
- OUT_RING (evo, 0x00000000);
+struct nv50_dmac {
+ struct nv50_chan base;
+ dma_addr_t handle;
+ u32 *ptr;
+};
- nv_wo32(disp->ramin, 0x2000, 0x00000000);
- FIRE_RING (evo);
-
- if (nv_wait_ne(disp->ramin, 0x2000, 0xffffffff, 0x00000000))
- return 0;
+static void
+nv50_dmac_destroy(struct nouveau_object *core, struct nv50_dmac *dmac)
+{
+ if (dmac->ptr) {
+ struct pci_dev *pdev = nv_device(core)->pdev;
+ pci_free_consistent(pdev, PAGE_SIZE, dmac->ptr, dmac->handle);
}
- return 0;
+ nv50_chan_destroy(core, &dmac->base);
}
-int
-nv50_display_init(struct drm_device *dev)
+static int
+nv50_dmac_create_fbdma(struct nouveau_object *core, u32 parent)
{
- struct nouveau_drm *drm = nouveau_drm(dev);
- struct nouveau_device *device = nouveau_dev(dev);
- struct nouveau_channel *evo;
- int ret, i;
- u32 val;
-
- NV_DEBUG(drm, "\n");
-
- nv_wr32(device, 0x00610184, nv_rd32(device, 0x00614004));
-
- /*
- * I think the 0x006101XX range is some kind of main control area
- * that enables things.
- */
- /* CRTC? */
- for (i = 0; i < 2; i++) {
- val = nv_rd32(device, 0x00616100 + (i * 0x800));
- nv_wr32(device, 0x00610190 + (i * 0x10), val);
- val = nv_rd32(device, 0x00616104 + (i * 0x800));
- nv_wr32(device, 0x00610194 + (i * 0x10), val);
- val = nv_rd32(device, 0x00616108 + (i * 0x800));
- nv_wr32(device, 0x00610198 + (i * 0x10), val);
- val = nv_rd32(device, 0x0061610c + (i * 0x800));
- nv_wr32(device, 0x0061019c + (i * 0x10), val);
- }
-
- /* DAC */
- for (i = 0; i < 3; i++) {
- val = nv_rd32(device, 0x0061a000 + (i * 0x800));
- nv_wr32(device, 0x006101d0 + (i * 0x04), val);
- }
-
- /* SOR */
- for (i = 0; i < nv50_sor_nr(dev); i++) {
- val = nv_rd32(device, 0x0061c000 + (i * 0x800));
- nv_wr32(device, 0x006101e0 + (i * 0x04), val);
- }
-
- /* EXT */
- for (i = 0; i < 3; i++) {
- val = nv_rd32(device, 0x0061e000 + (i * 0x800));
- nv_wr32(device, 0x006101f0 + (i * 0x04), val);
- }
-
- for (i = 0; i < 3; i++) {
- nv_wr32(device, NV50_PDISPLAY_DAC_DPMS_CTRL(i), 0x00550000 |
- NV50_PDISPLAY_DAC_DPMS_CTRL_PENDING);
- nv_wr32(device, NV50_PDISPLAY_DAC_CLK_CTRL1(i), 0x00000001);
- }
-
- /* The precise purpose is unknown, i suspect it has something to do
- * with text mode.
- */
- if (nv_rd32(device, NV50_PDISPLAY_INTR_1) & 0x100) {
- nv_wr32(device, NV50_PDISPLAY_INTR_1, 0x100);
- nv_wr32(device, 0x006194e8, nv_rd32(device, 0x006194e8) & ~1);
- if (!nv_wait(device, 0x006194e8, 2, 0)) {
- NV_ERROR(drm, "timeout: (0x6194e8 & 2) != 0\n");
- NV_ERROR(drm, "0x6194e8 = 0x%08x\n",
- nv_rd32(device, 0x6194e8));
- return -EBUSY;
- }
- }
-
- for (i = 0; i < 2; i++) {
- nv_wr32(device, NV50_PDISPLAY_CURSOR_CURSOR_CTRL2(i), 0x2000);
- if (!nv_wait(device, NV50_PDISPLAY_CURSOR_CURSOR_CTRL2(i),
- NV50_PDISPLAY_CURSOR_CURSOR_CTRL2_STATUS, 0)) {
- NV_ERROR(drm, "timeout: CURSOR_CTRL2_STATUS == 0\n");
- NV_ERROR(drm, "CURSOR_CTRL2 = 0x%08x\n",
- nv_rd32(device, NV50_PDISPLAY_CURSOR_CURSOR_CTRL2(i)));
- return -EBUSY;
- }
-
- nv_wr32(device, NV50_PDISPLAY_CURSOR_CURSOR_CTRL2(i),
- NV50_PDISPLAY_CURSOR_CURSOR_CTRL2_ON);
- if (!nv_wait(device, NV50_PDISPLAY_CURSOR_CURSOR_CTRL2(i),
- NV50_PDISPLAY_CURSOR_CURSOR_CTRL2_STATUS,
- NV50_PDISPLAY_CURSOR_CURSOR_CTRL2_STATUS_ACTIVE)) {
- NV_ERROR(drm, "timeout: "
- "CURSOR_CTRL2_STATUS_ACTIVE(%d)\n", i);
- NV_ERROR(drm, "CURSOR_CTRL2(%d) = 0x%08x\n", i,
- nv_rd32(device, NV50_PDISPLAY_CURSOR_CURSOR_CTRL2(i)));
- return -EBUSY;
- }
- }
-
- nv_wr32(device, NV50_PDISPLAY_PIO_CTRL, 0x00000000);
- nv_mask(device, NV50_PDISPLAY_INTR_0, 0x00000000, 0x00000000);
- nv_wr32(device, NV50_PDISPLAY_INTR_EN_0, 0x00000000);
- nv_mask(device, NV50_PDISPLAY_INTR_1, 0x00000000, 0x00000000);
- nv_wr32(device, NV50_PDISPLAY_INTR_EN_1,
- NV50_PDISPLAY_INTR_EN_1_CLK_UNK10 |
- NV50_PDISPLAY_INTR_EN_1_CLK_UNK20 |
- NV50_PDISPLAY_INTR_EN_1_CLK_UNK40);
-
- ret = nv50_evo_init(dev);
+ struct nouveau_fb *pfb = nouveau_fb(core);
+ struct nouveau_object *client = nv_pclass(core, NV_CLIENT_CLASS);
+ struct nouveau_object *object;
+ int ret = nouveau_object_new(client, parent, NvEvoVRAM_LP,
+ NV_DMA_IN_MEMORY_CLASS,
+ &(struct nv_dma_class) {
+ .flags = NV_DMA_TARGET_VRAM |
+ NV_DMA_ACCESS_RDWR,
+ .start = 0,
+ .limit = pfb->ram.size - 1,
+ .conf0 = NV50_DMA_CONF0_ENABLE |
+ NV50_DMA_CONF0_PART_256,
+ }, sizeof(struct nv_dma_class), &object);
if (ret)
return ret;
- evo = nv50_display(dev)->master;
-
- nv_wr32(device, NV50_PDISPLAY_OBJECTS, (nv50_display(dev)->ramin->addr >> 8) | 9);
- ret = RING_SPACE(evo, 3);
+ ret = nouveau_object_new(client, parent, NvEvoFB16,
+ NV_DMA_IN_MEMORY_CLASS,
+ &(struct nv_dma_class) {
+ .flags = NV_DMA_TARGET_VRAM |
+ NV_DMA_ACCESS_RDWR,
+ .start = 0,
+ .limit = pfb->ram.size - 1,
+ .conf0 = NV50_DMA_CONF0_ENABLE | 0x70 |
+ NV50_DMA_CONF0_PART_256,
+ }, sizeof(struct nv_dma_class), &object);
if (ret)
return ret;
- BEGIN_NV04(evo, 0, NV50_EVO_UNK84, 2);
- OUT_RING (evo, NV50_EVO_UNK84_NOTIFY_DISABLED);
- OUT_RING (evo, NvEvoSync);
- return nv50_display_sync(dev);
+ ret = nouveau_object_new(client, parent, NvEvoFB32,
+ NV_DMA_IN_MEMORY_CLASS,
+ &(struct nv_dma_class) {
+ .flags = NV_DMA_TARGET_VRAM |
+ NV_DMA_ACCESS_RDWR,
+ .start = 0,
+ .limit = pfb->ram.size - 1,
+ .conf0 = NV50_DMA_CONF0_ENABLE | 0x7a |
+ NV50_DMA_CONF0_PART_256,
+ }, sizeof(struct nv_dma_class), &object);
+ return ret;
}
-void
-nv50_display_fini(struct drm_device *dev)
+static int
+nvc0_dmac_create_fbdma(struct nouveau_object *core, u32 parent)
{
- struct nouveau_drm *drm = nouveau_drm(dev);
- struct nouveau_device *device = nouveau_dev(dev);
- struct nv50_display *disp = nv50_display(dev);
- struct nouveau_channel *evo = disp->master;
- struct drm_crtc *drm_crtc;
- int ret, i;
+ struct nouveau_fb *pfb = nouveau_fb(core);
+ struct nouveau_object *client = nv_pclass(core, NV_CLIENT_CLASS);
+ struct nouveau_object *object;
+ int ret = nouveau_object_new(client, parent, NvEvoVRAM_LP,
+ NV_DMA_IN_MEMORY_CLASS,
+ &(struct nv_dma_class) {
+ .flags = NV_DMA_TARGET_VRAM |
+ NV_DMA_ACCESS_RDWR,
+ .start = 0,
+ .limit = pfb->ram.size - 1,
+ .conf0 = NVC0_DMA_CONF0_ENABLE,
+ }, sizeof(struct nv_dma_class), &object);
+ if (ret)
+ return ret;
- NV_DEBUG(drm, "\n");
+ ret = nouveau_object_new(client, parent, NvEvoFB16,
+ NV_DMA_IN_MEMORY_CLASS,
+ &(struct nv_dma_class) {
+ .flags = NV_DMA_TARGET_VRAM |
+ NV_DMA_ACCESS_RDWR,
+ .start = 0,
+ .limit = pfb->ram.size - 1,
+ .conf0 = NVC0_DMA_CONF0_ENABLE | 0xfe,
+ }, sizeof(struct nv_dma_class), &object);
+ if (ret)
+ return ret;
- list_for_each_entry(drm_crtc, &dev->mode_config.crtc_list, head) {
- struct nouveau_crtc *crtc = nouveau_crtc(drm_crtc);
+ ret = nouveau_object_new(client, parent, NvEvoFB32,
+ NV_DMA_IN_MEMORY_CLASS,
+ &(struct nv_dma_class) {
+ .flags = NV_DMA_TARGET_VRAM |
+ NV_DMA_ACCESS_RDWR,
+ .start = 0,
+ .limit = pfb->ram.size - 1,
+ .conf0 = NVC0_DMA_CONF0_ENABLE | 0xfe,
+ }, sizeof(struct nv_dma_class), &object);
+ return ret;
+}
- nv50_crtc_blank(crtc, true);
- }
+static int
+nvd0_dmac_create_fbdma(struct nouveau_object *core, u32 parent)
+{
+ struct nouveau_fb *pfb = nouveau_fb(core);
+ struct nouveau_object *client = nv_pclass(core, NV_CLIENT_CLASS);
+ struct nouveau_object *object;
+ int ret = nouveau_object_new(client, parent, NvEvoVRAM_LP,
+ NV_DMA_IN_MEMORY_CLASS,
+ &(struct nv_dma_class) {
+ .flags = NV_DMA_TARGET_VRAM |
+ NV_DMA_ACCESS_RDWR,
+ .start = 0,
+ .limit = pfb->ram.size - 1,
+ .conf0 = NVD0_DMA_CONF0_ENABLE |
+ NVD0_DMA_CONF0_PAGE_LP,
+ }, sizeof(struct nv_dma_class), &object);
+ if (ret)
+ return ret;
- ret = RING_SPACE(evo, 2);
- if (ret == 0) {
- BEGIN_NV04(evo, 0, NV50_EVO_UPDATE, 1);
- OUT_RING(evo, 0);
- }
- FIRE_RING(evo);
+ ret = nouveau_object_new(client, parent, NvEvoFB32,
+ NV_DMA_IN_MEMORY_CLASS,
+ &(struct nv_dma_class) {
+ .flags = NV_DMA_TARGET_VRAM |
+ NV_DMA_ACCESS_RDWR,
+ .start = 0,
+ .limit = pfb->ram.size - 1,
+ .conf0 = NVD0_DMA_CONF0_ENABLE | 0xfe |
+ NVD0_DMA_CONF0_PAGE_LP,
+ }, sizeof(struct nv_dma_class), &object);
+ return ret;
+}
- /* Almost like ack'ing a vblank interrupt, maybe in the spirit of
- * cleaning up?
- */
- list_for_each_entry(drm_crtc, &dev->mode_config.crtc_list, head) {
- struct nouveau_crtc *crtc = nouveau_crtc(drm_crtc);
- uint32_t mask = NV50_PDISPLAY_INTR_1_VBLANK_CRTC_(crtc->index);
+static int
+nv50_dmac_create(struct nouveau_object *core, u32 bclass, u8 head,
+ void *data, u32 size, u64 syncbuf,
+ struct nv50_dmac *dmac)
+{
+ struct nouveau_fb *pfb = nouveau_fb(core);
+ struct nouveau_object *client = nv_pclass(core, NV_CLIENT_CLASS);
+ struct nouveau_object *object;
+ u32 pushbuf = *(u32 *)data;
+ int ret;
- if (!crtc->base.enabled)
- continue;
+ dmac->ptr = pci_alloc_consistent(nv_device(core)->pdev, PAGE_SIZE,
+ &dmac->handle);
+ if (!dmac->ptr)
+ return -ENOMEM;
- nv_wr32(device, NV50_PDISPLAY_INTR_1, mask);
- if (!nv_wait(device, NV50_PDISPLAY_INTR_1, mask, mask)) {
- NV_ERROR(drm, "timeout: (0x610024 & 0x%08x) == "
- "0x%08x\n", mask, mask);
- NV_ERROR(drm, "0x610024 = 0x%08x\n",
- nv_rd32(device, NV50_PDISPLAY_INTR_1));
- }
- }
+ ret = nouveau_object_new(client, NVDRM_DEVICE, pushbuf,
+ NV_DMA_FROM_MEMORY_CLASS,
+ &(struct nv_dma_class) {
+ .flags = NV_DMA_TARGET_PCI_US |
+ NV_DMA_ACCESS_RD,
+ .start = dmac->handle + 0x0000,
+ .limit = dmac->handle + 0x0fff,
+ }, sizeof(struct nv_dma_class), &object);
+ if (ret)
+ return ret;
- for (i = 0; i < 2; i++) {
- nv_wr32(device, NV50_PDISPLAY_CURSOR_CURSOR_CTRL2(i), 0);
- if (!nv_wait(device, NV50_PDISPLAY_CURSOR_CURSOR_CTRL2(i),
- NV50_PDISPLAY_CURSOR_CURSOR_CTRL2_STATUS, 0)) {
- NV_ERROR(drm, "timeout: CURSOR_CTRL2_STATUS == 0\n");
- NV_ERROR(drm, "CURSOR_CTRL2 = 0x%08x\n",
- nv_rd32(device, NV50_PDISPLAY_CURSOR_CURSOR_CTRL2(i)));
- }
- }
+ ret = nv50_chan_create(core, bclass, head, data, size, &dmac->base);
+ if (ret)
+ return ret;
- nv50_evo_fini(dev);
+ ret = nouveau_object_new(client, dmac->base.handle, NvEvoSync,
+ NV_DMA_IN_MEMORY_CLASS,
+ &(struct nv_dma_class) {
+ .flags = NV_DMA_TARGET_VRAM |
+ NV_DMA_ACCESS_RDWR,
+ .start = syncbuf + 0x0000,
+ .limit = syncbuf + 0x0fff,
+ }, sizeof(struct nv_dma_class), &object);
+ if (ret)
+ return ret;
- for (i = 0; i < 3; i++) {
- if (!nv_wait(device, NV50_PDISPLAY_SOR_DPMS_STATE(i),
- NV50_PDISPLAY_SOR_DPMS_STATE_WAIT, 0)) {
- NV_ERROR(drm, "timeout: SOR_DPMS_STATE_WAIT(%d) == 0\n", i);
- NV_ERROR(drm, "SOR_DPMS_STATE(%d) = 0x%08x\n", i,
- nv_rd32(device, NV50_PDISPLAY_SOR_DPMS_STATE(i)));
- }
- }
+ ret = nouveau_object_new(client, dmac->base.handle, NvEvoVRAM,
+ NV_DMA_IN_MEMORY_CLASS,
+ &(struct nv_dma_class) {
+ .flags = NV_DMA_TARGET_VRAM |
+ NV_DMA_ACCESS_RDWR,
+ .start = 0,
+ .limit = pfb->ram.size - 1,
+ }, sizeof(struct nv_dma_class), &object);
+ if (ret)
+ return ret;
- /* disable interrupts. */
- nv_wr32(device, NV50_PDISPLAY_INTR_EN_1, 0x00000000);
+ if (nv_device(core)->card_type < NV_C0)
+ ret = nv50_dmac_create_fbdma(core, dmac->base.handle);
+ else
+ if (nv_device(core)->card_type < NV_D0)
+ ret = nvc0_dmac_create_fbdma(core, dmac->base.handle);
+ else
+ ret = nvd0_dmac_create_fbdma(core, dmac->base.handle);
+ return ret;
}
-int
-nv50_display_create(struct drm_device *dev)
+struct nv50_mast {
+ struct nv50_dmac base;
+};
+
+struct nv50_curs {
+ struct nv50_pioc base;
+};
+
+struct nv50_sync {
+ struct nv50_dmac base;
+ struct {
+ u32 offset;
+ u16 value;
+ } sem;
+};
+
+struct nv50_ovly {
+ struct nv50_dmac base;
+};
+
+struct nv50_oimm {
+ struct nv50_pioc base;
+};
+
+struct nv50_head {
+ struct nouveau_crtc base;
+ struct nv50_curs curs;
+ struct nv50_sync sync;
+ struct nv50_ovly ovly;
+ struct nv50_oimm oimm;
+};
+
+#define nv50_head(c) ((struct nv50_head *)nouveau_crtc(c))
+#define nv50_curs(c) (&nv50_head(c)->curs)
+#define nv50_sync(c) (&nv50_head(c)->sync)
+#define nv50_ovly(c) (&nv50_head(c)->ovly)
+#define nv50_oimm(c) (&nv50_head(c)->oimm)
+#define nv50_chan(c) (&(c)->base.base)
+#define nv50_vers(c) nv_mclass(nv50_chan(c)->user)
+
+struct nv50_disp {
+ struct nouveau_object *core;
+ struct nv50_mast mast;
+
+ u32 modeset;
+
+ struct nouveau_bo *sync;
+};
+
+static struct nv50_disp *
+nv50_disp(struct drm_device *dev)
{
- struct nouveau_drm *drm = nouveau_drm(dev);
- struct dcb_table *dcb = &drm->vbios.dcb;
- struct drm_connector *connector, *ct;
- struct nv50_display *priv;
- int ret, i;
+ return nouveau_display(dev)->priv;
+}
- NV_DEBUG(drm, "\n");
+#define nv50_mast(d) (&nv50_disp(d)->mast)
- priv = kzalloc(sizeof(*priv), GFP_KERNEL);
- if (!priv)
- return -ENOMEM;
-
- nouveau_display(dev)->priv = priv;
- nouveau_display(dev)->dtor = nv50_display_destroy;
- nouveau_display(dev)->init = nv50_display_init;
- nouveau_display(dev)->fini = nv50_display_fini;
+static struct drm_crtc *
+nv50_display_crtc_get(struct drm_encoder *encoder)
+{
+ return nouveau_encoder(encoder)->crtc;
+}
- /* Create CRTC objects */
- for (i = 0; i < 2; i++) {
- ret = nv50_crtc_create(dev, i);
- if (ret)
- return ret;
- }
+/******************************************************************************
+ * EVO channel helpers
+ *****************************************************************************/
+static u32 *
+evo_wait(void *evoc, int nr)
+{
+ struct nv50_dmac *dmac = evoc;
+ u32 put = nv_ro32(dmac->base.user, 0x0000) / 4;
- /* We setup the encoders from the BIOS table */
- for (i = 0 ; i < dcb->entries; i++) {
- struct dcb_output *entry = &dcb->entry[i];
+ if (put + nr >= (PAGE_SIZE / 4) - 8) {
+ dmac->ptr[put] = 0x20000000;
- if (entry->location != DCB_LOC_ON_CHIP) {
- NV_WARN(drm, "Off-chip encoder %d/%d unsupported\n",
- entry->type, ffs(entry->or) - 1);
- continue;
+ nv_wo32(dmac->base.user, 0x0000, 0x00000000);
+ if (!nv_wait(dmac->base.user, 0x0004, ~0, 0x00000000)) {
+ NV_ERROR(dmac->base.user, "channel stalled\n");
+ return NULL;
}
- connector = nouveau_connector_create(dev, entry->connector);
- if (IS_ERR(connector))
- continue;
-
- switch (entry->type) {
- case DCB_OUTPUT_TMDS:
- case DCB_OUTPUT_LVDS:
- case DCB_OUTPUT_DP:
- nv50_sor_create(connector, entry);
- break;
- case DCB_OUTPUT_ANALOG:
- nv50_dac_create(connector, entry);
- break;
- default:
- NV_WARN(drm, "DCB encoder %d unknown\n", entry->type);
- continue;
- }
+ put = 0;
}
- list_for_each_entry_safe(connector, ct,
- &dev->mode_config.connector_list, head) {
- if (!connector->encoder_ids[0]) {
- NV_WARN(drm, "%s has no encoders, removing\n",
- drm_get_connector_name(connector));
- connector->funcs->destroy(connector);
- }
- }
+ return dmac->ptr + put;
+}
- tasklet_init(&priv->tasklet, nv50_display_bh, (unsigned long)dev);
+static void
+evo_kick(u32 *push, void *evoc)
+{
+ struct nv50_dmac *dmac = evoc;
+ nv_wo32(dmac->base.user, 0x0000, (push - dmac->ptr) << 2);
+}
- ret = nv50_evo_create(dev);
- if (ret) {
- nv50_display_destroy(dev);
- return ret;
- }
+#define evo_mthd(p,m,s) *((p)++) = (((s) << 18) | (m))
+#define evo_data(p,d) *((p)++) = (d)
- return 0;
+static bool
+evo_sync_wait(void *data)
+{
+ return nouveau_bo_rd32(data, EVO_MAST_NTFY) != 0x00000000;
}
-void
-nv50_display_destroy(struct drm_device *dev)
+static int
+evo_sync(struct drm_device *dev)
{
- struct nv50_display *disp = nv50_display(dev);
+ struct nouveau_device *device = nouveau_dev(dev);
+ struct nv50_disp *disp = nv50_disp(dev);
+ struct nv50_mast *mast = nv50_mast(dev);
+ u32 *push = evo_wait(mast, 8);
+ if (push) {
+ nouveau_bo_wr32(disp->sync, EVO_MAST_NTFY, 0x00000000);
+ evo_mthd(push, 0x0084, 1);
+ evo_data(push, 0x80000000 | EVO_MAST_NTFY);
+ evo_mthd(push, 0x0080, 2);
+ evo_data(push, 0x00000000);
+ evo_data(push, 0x00000000);
+ evo_kick(push, mast);
+ if (nv_wait_cb(device, evo_sync_wait, disp->sync))
+ return 0;
+ }
- nv50_evo_destroy(dev);
- kfree(disp);
+ return -EBUSY;
}
+/******************************************************************************
+ * Page flipping channel
+ *****************************************************************************/
struct nouveau_bo *
nv50_display_crtc_sema(struct drm_device *dev, int crtc)
{
- return nv50_display(dev)->crtc[crtc].sem.bo;
+ return nv50_disp(dev)->sync;
}
void
nv50_display_flip_stop(struct drm_crtc *crtc)
{
- struct nv50_display *disp = nv50_display(crtc->dev);
- struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
- struct nv50_display_crtc *dispc = &disp->crtc[nv_crtc->index];
- struct nouveau_channel *evo = dispc->sync;
- int ret;
-
- ret = RING_SPACE(evo, 8);
- if (ret) {
- WARN_ON(1);
- return;
+ struct nv50_sync *sync = nv50_sync(crtc);
+ u32 *push;
+
+ push = evo_wait(sync, 8);
+ if (push) {
+ evo_mthd(push, 0x0084, 1);
+ evo_data(push, 0x00000000);
+ evo_mthd(push, 0x0094, 1);
+ evo_data(push, 0x00000000);
+ evo_mthd(push, 0x00c0, 1);
+ evo_data(push, 0x00000000);
+ evo_mthd(push, 0x0080, 1);
+ evo_data(push, 0x00000000);
+ evo_kick(push, sync);
}
-
- BEGIN_NV04(evo, 0, 0x0084, 1);
- OUT_RING (evo, 0x00000000);
- BEGIN_NV04(evo, 0, 0x0094, 1);
- OUT_RING (evo, 0x00000000);
- BEGIN_NV04(evo, 0, 0x00c0, 1);
- OUT_RING (evo, 0x00000000);
- BEGIN_NV04(evo, 0, 0x0080, 1);
- OUT_RING (evo, 0x00000000);
- FIRE_RING (evo);
}
int
nv50_display_flip_next(struct drm_crtc *crtc, struct drm_framebuffer *fb,
- struct nouveau_channel *chan)
+ struct nouveau_channel *chan, u32 swap_interval)
{
- struct nouveau_drm *drm = nouveau_drm(crtc->dev);
struct nouveau_framebuffer *nv_fb = nouveau_framebuffer(fb);
- struct nv50_display *disp = nv50_display(crtc->dev);
+ struct nv50_disp *disp = nv50_disp(crtc->dev);
struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
- struct nv50_display_crtc *dispc = &disp->crtc[nv_crtc->index];
- struct nouveau_channel *evo = dispc->sync;
+ struct nv50_sync *sync = nv50_sync(crtc);
+ u32 *push;
int ret;
- ret = RING_SPACE(evo, chan ? 25 : 27);
- if (unlikely(ret))
- return ret;
+ swap_interval <<= 4;
+ if (swap_interval == 0)
+ swap_interval |= 0x100;
+
+ push = evo_wait(sync, 128);
+ if (unlikely(push == NULL))
+ return -EBUSY;
/* synchronise with the rendering channel, if necessary */
if (likely(chan)) {
ret = RING_SPACE(chan, 10);
- if (ret) {
- WIND_RING(evo);
+ if (ret)
return ret;
- }
- if (nv_device(drm->device)->chipset < 0xc0) {
- BEGIN_NV04(chan, 0, 0x0060, 2);
+ if (nv_mclass(chan->object) < NVC0_CHANNEL_IND_CLASS) {
+ BEGIN_NV04(chan, 0, NV11_SUBCHAN_DMA_SEMAPHORE, 2);
OUT_RING (chan, NvEvoSema0 + nv_crtc->index);
- OUT_RING (chan, dispc->sem.offset);
- BEGIN_NV04(chan, 0, 0x006c, 1);
- OUT_RING (chan, 0xf00d0000 | dispc->sem.value);
- BEGIN_NV04(chan, 0, 0x0064, 2);
- OUT_RING (chan, dispc->sem.offset ^ 0x10);
+ OUT_RING (chan, sync->sem.offset);
+ BEGIN_NV04(chan, 0, NV11_SUBCHAN_SEMAPHORE_RELEASE, 1);
+ OUT_RING (chan, 0xf00d0000 | sync->sem.value);
+ BEGIN_NV04(chan, 0, NV11_SUBCHAN_SEMAPHORE_OFFSET, 2);
+ OUT_RING (chan, sync->sem.offset ^ 0x10);
OUT_RING (chan, 0x74b1e000);
- BEGIN_NV04(chan, 0, 0x0060, 1);
- if (nv_device(drm->device)->chipset < 0x84)
+ BEGIN_NV04(chan, 0, NV11_SUBCHAN_DMA_SEMAPHORE, 1);
+ if (nv_mclass(chan->object) < NV84_CHANNEL_DMA_CLASS)
OUT_RING (chan, NvSema);
else
OUT_RING (chan, chan->vram);
} else {
u64 offset = nvc0_fence_crtc(chan, nv_crtc->index);
- offset += dispc->sem.offset;
- BEGIN_NVC0(chan, 0, 0x0010, 4);
+ offset += sync->sem.offset;
+
+ BEGIN_NVC0(chan, 0, NV84_SUBCHAN_SEMAPHORE_ADDRESS_HIGH, 4);
OUT_RING (chan, upper_32_bits(offset));
OUT_RING (chan, lower_32_bits(offset));
- OUT_RING (chan, 0xf00d0000 | dispc->sem.value);
+ OUT_RING (chan, 0xf00d0000 | sync->sem.value);
OUT_RING (chan, 0x1002);
- BEGIN_NVC0(chan, 0, 0x0010, 4);
+ BEGIN_NVC0(chan, 0, NV84_SUBCHAN_SEMAPHORE_ADDRESS_HIGH, 4);
OUT_RING (chan, upper_32_bits(offset));
OUT_RING (chan, lower_32_bits(offset ^ 0x10));
OUT_RING (chan, 0x74b1e000);
OUT_RING (chan, 0x1001);
}
+
FIRE_RING (chan);
} else {
- nouveau_bo_wr32(dispc->sem.bo, dispc->sem.offset / 4,
- 0xf00d0000 | dispc->sem.value);
+ nouveau_bo_wr32(disp->sync, sync->sem.offset / 4,
+ 0xf00d0000 | sync->sem.value);
+ evo_sync(crtc->dev);
}
- /* queue the flip on the crtc's "display sync" channel */
- BEGIN_NV04(evo, 0, 0x0100, 1);
- OUT_RING (evo, 0xfffe0000);
- if (chan) {
- BEGIN_NV04(evo, 0, 0x0084, 1);
- OUT_RING (evo, 0x00000100);
+ /* queue the flip */
+ evo_mthd(push, 0x0100, 1);
+ evo_data(push, 0xfffe0000);
+ evo_mthd(push, 0x0084, 1);
+ evo_data(push, swap_interval);
+ if (!(swap_interval & 0x00000100)) {
+ evo_mthd(push, 0x00e0, 1);
+ evo_data(push, 0x40000000);
+ }
+ evo_mthd(push, 0x0088, 4);
+ evo_data(push, sync->sem.offset);
+ evo_data(push, 0xf00d0000 | sync->sem.value);
+ evo_data(push, 0x74b1e000);
+ evo_data(push, NvEvoSync);
+ evo_mthd(push, 0x00a0, 2);
+ evo_data(push, 0x00000000);
+ evo_data(push, 0x00000000);
+ evo_mthd(push, 0x00c0, 1);
+ evo_data(push, nv_fb->r_dma);
+ evo_mthd(push, 0x0110, 2);
+ evo_data(push, 0x00000000);
+ evo_data(push, 0x00000000);
+ if (nv50_vers(sync) < NVD0_DISP_SYNC_CLASS) {
+ evo_mthd(push, 0x0800, 5);
+ evo_data(push, nv_fb->nvbo->bo.offset >> 8);
+ evo_data(push, 0);
+ evo_data(push, (fb->height << 16) | fb->width);
+ evo_data(push, nv_fb->r_pitch);
+ evo_data(push, nv_fb->r_format);
} else {
- BEGIN_NV04(evo, 0, 0x0084, 1);
- OUT_RING (evo, 0x00000010);
- /* allows gamma somehow, PDISP will bitch at you if
- * you don't wait for vblank before changing this..
- */
- BEGIN_NV04(evo, 0, 0x00e0, 1);
- OUT_RING (evo, 0x40000000);
- }
- BEGIN_NV04(evo, 0, 0x0088, 4);
- OUT_RING (evo, dispc->sem.offset);
- OUT_RING (evo, 0xf00d0000 | dispc->sem.value);
- OUT_RING (evo, 0x74b1e000);
- OUT_RING (evo, NvEvoSync);
- BEGIN_NV04(evo, 0, 0x00a0, 2);
- OUT_RING (evo, 0x00000000);
- OUT_RING (evo, 0x00000000);
- BEGIN_NV04(evo, 0, 0x00c0, 1);
- OUT_RING (evo, nv_fb->r_dma);
- BEGIN_NV04(evo, 0, 0x0110, 2);
- OUT_RING (evo, 0x00000000);
- OUT_RING (evo, 0x00000000);
- BEGIN_NV04(evo, 0, 0x0800, 5);
- OUT_RING (evo, nv_fb->nvbo->bo.offset >> 8);
- OUT_RING (evo, 0);
- OUT_RING (evo, (fb->height << 16) | fb->width);
- OUT_RING (evo, nv_fb->r_pitch);
- OUT_RING (evo, nv_fb->r_format);
- BEGIN_NV04(evo, 0, 0x0080, 1);
- OUT_RING (evo, 0x00000000);
- FIRE_RING (evo);
-
- dispc->sem.offset ^= 0x10;
- dispc->sem.value++;
+ evo_mthd(push, 0x0400, 5);
+ evo_data(push, nv_fb->nvbo->bo.offset >> 8);
+ evo_data(push, 0);
+ evo_data(push, (fb->height << 16) | fb->width);
+ evo_data(push, nv_fb->r_pitch);
+ evo_data(push, nv_fb->r_format);
+ }
+ evo_mthd(push, 0x0080, 1);
+ evo_data(push, 0x00000000);
+ evo_kick(push, sync);
+
+ sync->sem.offset ^= 0x10;
+ sync->sem.value++;
return 0;
}
-static u16
-nv50_display_script_select(struct drm_device *dev, struct dcb_output *dcb,
- u32 mc, int pxclk)
+/******************************************************************************
+ * CRTC
+ *****************************************************************************/
+static int
+nv50_crtc_set_dither(struct nouveau_crtc *nv_crtc, bool update)
{
- struct nouveau_drm *drm = nouveau_drm(dev);
- struct nouveau_connector *nv_connector = NULL;
- struct drm_encoder *encoder;
- struct nvbios *bios = &drm->vbios;
- u32 script = 0, or;
+ struct nv50_mast *mast = nv50_mast(nv_crtc->base.dev);
+ struct nouveau_connector *nv_connector;
+ struct drm_connector *connector;
+ u32 *push, mode = 0x00;
+
+ nv_connector = nouveau_crtc_connector_get(nv_crtc);
+ connector = &nv_connector->base;
+ if (nv_connector->dithering_mode == DITHERING_MODE_AUTO) {
+ if (nv_crtc->base.fb->depth > connector->display_info.bpc * 3)
+ mode = DITHERING_MODE_DYNAMIC2X2;
+ } else {
+ mode = nv_connector->dithering_mode;
+ }
- list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
- struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
+ if (nv_connector->dithering_depth == DITHERING_DEPTH_AUTO) {
+ if (connector->display_info.bpc >= 8)
+ mode |= DITHERING_DEPTH_8BPC;
+ } else {
+ mode |= nv_connector->dithering_depth;
+ }
- if (nv_encoder->dcb != dcb)
- continue;
+ push = evo_wait(mast, 4);
+ if (push) {
+ if (nv50_vers(mast) < NVD0_DISP_MAST_CLASS) {
+ evo_mthd(push, 0x08a0 + (nv_crtc->index * 0x0400), 1);
+ evo_data(push, mode);
+ } else
+ if (nv50_vers(mast) < NVE0_DISP_MAST_CLASS) {
+ evo_mthd(push, 0x0490 + (nv_crtc->index * 0x0300), 1);
+ evo_data(push, mode);
+ } else {
+ evo_mthd(push, 0x04a0 + (nv_crtc->index * 0x0300), 1);
+ evo_data(push, mode);
+ }
- nv_connector = nouveau_encoder_connector_get(nv_encoder);
- break;
+ if (update) {
+ evo_mthd(push, 0x0080, 1);
+ evo_data(push, 0x00000000);
+ }
+ evo_kick(push, mast);
}
- or = ffs(dcb->or) - 1;
- switch (dcb->type) {
- case DCB_OUTPUT_LVDS:
- script = (mc >> 8) & 0xf;
- if (bios->fp_no_ddc) {
- if (bios->fp.dual_link)
- script |= 0x0100;
- if (bios->fp.if_is_24bit)
- script |= 0x0200;
- } else {
- /* determine number of lvds links */
- if (nv_connector && nv_connector->edid &&
- nv_connector->type == DCB_CONNECTOR_LVDS_SPWG) {
- /* http://www.spwg.org */
- if (((u8 *)nv_connector->edid)[121] == 2)
- script |= 0x0100;
- } else
- if (pxclk >= bios->fp.duallink_transition_clk) {
- script |= 0x0100;
- }
+ return 0;
+}
- /* determine panel depth */
- if (script & 0x0100) {
- if (bios->fp.strapless_is_24bit & 2)
- script |= 0x0200;
- } else {
- if (bios->fp.strapless_is_24bit & 1)
- script |= 0x0200;
- }
+static int
+nv50_crtc_set_scale(struct nouveau_crtc *nv_crtc, bool update)
+{
+ struct nv50_mast *mast = nv50_mast(nv_crtc->base.dev);
+ struct drm_display_mode *omode, *umode = &nv_crtc->base.mode;
+ struct drm_crtc *crtc = &nv_crtc->base;
+ struct nouveau_connector *nv_connector;
+ int mode = DRM_MODE_SCALE_NONE;
+ u32 oX, oY, *push;
+
+ /* start off at the resolution we programmed the crtc for, this
+ * effectively handles NONE/FULL scaling
+ */
+ nv_connector = nouveau_crtc_connector_get(nv_crtc);
+ if (nv_connector && nv_connector->native_mode)
+ mode = nv_connector->scaling_mode;
+
+ if (mode != DRM_MODE_SCALE_NONE)
+ omode = nv_connector->native_mode;
+ else
+ omode = umode;
+
+ oX = omode->hdisplay;
+ oY = omode->vdisplay;
+ if (omode->flags & DRM_MODE_FLAG_DBLSCAN)
+ oY *= 2;
+
+ /* add overscan compensation if necessary, will keep the aspect
+ * ratio the same as the backend mode unless overridden by the
+ * user setting both hborder and vborder properties.
+ */
+ if (nv_connector && ( nv_connector->underscan == UNDERSCAN_ON ||
+ (nv_connector->underscan == UNDERSCAN_AUTO &&
+ nv_connector->edid &&
+ drm_detect_hdmi_monitor(nv_connector->edid)))) {
+ u32 bX = nv_connector->underscan_hborder;
+ u32 bY = nv_connector->underscan_vborder;
+ u32 aspect = (oY << 19) / oX;
+
+ if (bX) {
+ oX -= (bX * 2);
+ if (bY) oY -= (bY * 2);
+ else oY = ((oX * aspect) + (aspect / 2)) >> 19;
+ } else {
+ oX -= (oX >> 4) + 32;
+ if (bY) oY -= (bY * 2);
+ else oY = ((oX * aspect) + (aspect / 2)) >> 19;
+ }
+ }
- if (nv_connector && nv_connector->edid &&
- (nv_connector->edid->revision >= 4) &&
- (nv_connector->edid->input & 0x70) >= 0x20)
- script |= 0x0200;
+ /* handle CENTER/ASPECT scaling, taking into account the areas
+ * removed already for overscan compensation
+ */
+ switch (mode) {
+ case DRM_MODE_SCALE_CENTER:
+ oX = min((u32)umode->hdisplay, oX);
+ oY = min((u32)umode->vdisplay, oY);
+ /* fall-through */
+ case DRM_MODE_SCALE_ASPECT:
+ if (oY < oX) {
+ u32 aspect = (umode->hdisplay << 19) / umode->vdisplay;
+ oX = ((oY * aspect) + (aspect / 2)) >> 19;
+ } else {
+ u32 aspect = (umode->vdisplay << 19) / umode->hdisplay;
+ oY = ((oX * aspect) + (aspect / 2)) >> 19;
}
break;
- case DCB_OUTPUT_TMDS:
- script = (mc >> 8) & 0xf;
- if (pxclk >= 165000)
- script |= 0x0100;
- break;
- case DCB_OUTPUT_DP:
- script = (mc >> 8) & 0xf;
- break;
- case DCB_OUTPUT_ANALOG:
- script = 0xff;
- break;
default:
- NV_ERROR(drm, "modeset on unsupported output type!\n");
break;
}
- return script;
+ push = evo_wait(mast, 8);
+ if (push) {
+ if (nv50_vers(mast) < NVD0_DISP_MAST_CLASS) {
+ /*XXX: SCALE_CTRL_ACTIVE??? */
+ evo_mthd(push, 0x08d8 + (nv_crtc->index * 0x400), 2);
+ evo_data(push, (oY << 16) | oX);
+ evo_data(push, (oY << 16) | oX);
+ evo_mthd(push, 0x08a4 + (nv_crtc->index * 0x400), 1);
+ evo_data(push, 0x00000000);
+ evo_mthd(push, 0x08c8 + (nv_crtc->index * 0x400), 1);
+ evo_data(push, umode->vdisplay << 16 | umode->hdisplay);
+ } else {
+ evo_mthd(push, 0x04c0 + (nv_crtc->index * 0x300), 3);
+ evo_data(push, (oY << 16) | oX);
+ evo_data(push, (oY << 16) | oX);
+ evo_data(push, (oY << 16) | oX);
+ evo_mthd(push, 0x0494 + (nv_crtc->index * 0x300), 1);
+ evo_data(push, 0x00000000);
+ evo_mthd(push, 0x04b8 + (nv_crtc->index * 0x300), 1);
+ evo_data(push, umode->vdisplay << 16 | umode->hdisplay);
+ }
+
+ evo_kick(push, mast);
+
+ if (update) {
+ nv50_display_flip_stop(crtc);
+ nv50_display_flip_next(crtc, crtc->fb, NULL, 1);
+ }
+ }
+
+ return 0;
}
-static void
-nv50_display_unk10_handler(struct drm_device *dev)
+static int
+nv50_crtc_set_color_vibrance(struct nouveau_crtc *nv_crtc, bool update)
{
- struct nouveau_device *device = nouveau_dev(dev);
- struct nouveau_drm *drm = nouveau_drm(dev);
- struct nv50_display *disp = nv50_display(dev);
- u32 unk30 = nv_rd32(device, 0x610030), mc;
- int i, crtc, or = 0, type = DCB_OUTPUT_ANY;
+ struct nv50_mast *mast = nv50_mast(nv_crtc->base.dev);
+ u32 *push, hue, vib;
+ int adj;
+
+ adj = (nv_crtc->color_vibrance > 0) ? 50 : 0;
+ vib = ((nv_crtc->color_vibrance * 2047 + adj) / 100) & 0xfff;
+ hue = ((nv_crtc->vibrant_hue * 2047) / 100) & 0xfff;
+
+ push = evo_wait(mast, 16);
+ if (push) {
+ if (nv50_vers(mast) < NVD0_DISP_MAST_CLASS) {
+ evo_mthd(push, 0x08a8 + (nv_crtc->index * 0x400), 1);
+ evo_data(push, (hue << 20) | (vib << 8));
+ } else {
+ evo_mthd(push, 0x0498 + (nv_crtc->index * 0x300), 1);
+ evo_data(push, (hue << 20) | (vib << 8));
+ }
- NV_DEBUG(drm, "0x610030: 0x%08x\n", unk30);
- disp->irq.dcb = NULL;
+ if (update) {
+ evo_mthd(push, 0x0080, 1);
+ evo_data(push, 0x00000000);
+ }
+ evo_kick(push, mast);
+ }
- nv_wr32(device, 0x619494, nv_rd32(device, 0x619494) & ~8);
+ return 0;
+}
- /* Determine which CRTC we're dealing with, only 1 ever will be
- * signalled at the same time with the current nouveau code.
- */
- crtc = ffs((unk30 & 0x00000060) >> 5) - 1;
- if (crtc < 0)
- goto ack;
-
- /* Nothing needs to be done for the encoder */
- crtc = ffs((unk30 & 0x00000180) >> 7) - 1;
- if (crtc < 0)
- goto ack;
-
- /* Find which encoder was connected to the CRTC */
- for (i = 0; type == DCB_OUTPUT_ANY && i < 3; i++) {
- mc = nv_rd32(device, NV50_PDISPLAY_DAC_MODE_CTRL_C(i));
- NV_DEBUG(drm, "DAC-%d mc: 0x%08x\n", i, mc);
- if (!(mc & (1 << crtc)))
- continue;
+static int
+nv50_crtc_set_image(struct nouveau_crtc *nv_crtc, struct drm_framebuffer *fb,
+ int x, int y, bool update)
+{
+ struct nouveau_framebuffer *nvfb = nouveau_framebuffer(fb);
+ struct nv50_mast *mast = nv50_mast(nv_crtc->base.dev);
+ u32 *push;
+
+ push = evo_wait(mast, 16);
+ if (push) {
+ if (nv50_vers(mast) < NVD0_DISP_MAST_CLASS) {
+ evo_mthd(push, 0x0860 + (nv_crtc->index * 0x400), 1);
+ evo_data(push, nvfb->nvbo->bo.offset >> 8);
+ evo_mthd(push, 0x0868 + (nv_crtc->index * 0x400), 3);
+ evo_data(push, (fb->height << 16) | fb->width);
+ evo_data(push, nvfb->r_pitch);
+ evo_data(push, nvfb->r_format);
+ evo_mthd(push, 0x08c0 + (nv_crtc->index * 0x400), 1);
+ evo_data(push, (y << 16) | x);
+ if (nv50_vers(mast) > NV50_DISP_MAST_CLASS) {
+ evo_mthd(push, 0x0874 + (nv_crtc->index * 0x400), 1);
+ evo_data(push, nvfb->r_dma);
+ }
+ } else {
+ evo_mthd(push, 0x0460 + (nv_crtc->index * 0x300), 1);
+ evo_data(push, nvfb->nvbo->bo.offset >> 8);
+ evo_mthd(push, 0x0468 + (nv_crtc->index * 0x300), 4);
+ evo_data(push, (fb->height << 16) | fb->width);
+ evo_data(push, nvfb->r_pitch);
+ evo_data(push, nvfb->r_format);
+ evo_data(push, nvfb->r_dma);
+ evo_mthd(push, 0x04b0 + (nv_crtc->index * 0x300), 1);
+ evo_data(push, (y << 16) | x);
+ }
- switch ((mc & 0x00000f00) >> 8) {
- case 0: type = DCB_OUTPUT_ANALOG; break;
- case 1: type = DCB_OUTPUT_TV; break;
- default:
- NV_ERROR(drm, "invalid mc, DAC-%d: 0x%08x\n", i, mc);
- goto ack;
+ if (update) {
+ evo_mthd(push, 0x0080, 1);
+ evo_data(push, 0x00000000);
}
+ evo_kick(push, mast);
+ }
- or = i;
+ nv_crtc->fb.tile_flags = nvfb->r_dma;
+ return 0;
+}
+
+static void
+nv50_crtc_cursor_show(struct nouveau_crtc *nv_crtc)
+{
+ struct nv50_mast *mast = nv50_mast(nv_crtc->base.dev);
+ u32 *push = evo_wait(mast, 16);
+ if (push) {
+ if (nv50_vers(mast) < NV84_DISP_MAST_CLASS) {
+ evo_mthd(push, 0x0880 + (nv_crtc->index * 0x400), 2);
+ evo_data(push, 0x85000000);
+ evo_data(push, nv_crtc->cursor.nvbo->bo.offset >> 8);
+ } else
+ if (nv50_vers(mast) < NVD0_DISP_MAST_CLASS) {
+ evo_mthd(push, 0x0880 + (nv_crtc->index * 0x400), 2);
+ evo_data(push, 0x85000000);
+ evo_data(push, nv_crtc->cursor.nvbo->bo.offset >> 8);
+ evo_mthd(push, 0x089c + (nv_crtc->index * 0x400), 1);
+ evo_data(push, NvEvoVRAM);
+ } else {
+ evo_mthd(push, 0x0480 + (nv_crtc->index * 0x300), 2);
+ evo_data(push, 0x85000000);
+ evo_data(push, nv_crtc->cursor.nvbo->bo.offset >> 8);
+ evo_mthd(push, 0x048c + (nv_crtc->index * 0x300), 1);
+ evo_data(push, NvEvoVRAM);
+ }
+ evo_kick(push, mast);
}
+}
- for (i = 0; type == DCB_OUTPUT_ANY && i < nv50_sor_nr(dev); i++) {
- if (nv_device(drm->device)->chipset < 0x90 ||
- nv_device(drm->device)->chipset == 0x92 ||
- nv_device(drm->device)->chipset == 0xa0)
- mc = nv_rd32(device, NV50_PDISPLAY_SOR_MODE_CTRL_C(i));
- else
- mc = nv_rd32(device, NV90_PDISPLAY_SOR_MODE_CTRL_C(i));
+static void
+nv50_crtc_cursor_hide(struct nouveau_crtc *nv_crtc)
+{
+ struct nv50_mast *mast = nv50_mast(nv_crtc->base.dev);
+ u32 *push = evo_wait(mast, 16);
+ if (push) {
+ if (nv50_vers(mast) < NV84_DISP_MAST_CLASS) {
+ evo_mthd(push, 0x0880 + (nv_crtc->index * 0x400), 1);
+ evo_data(push, 0x05000000);
+ } else
+ if (nv50_vers(mast) < NVD0_DISP_MAST_CLASS) {
+ evo_mthd(push, 0x0880 + (nv_crtc->index * 0x400), 1);
+ evo_data(push, 0x05000000);
+ evo_mthd(push, 0x089c + (nv_crtc->index * 0x400), 1);
+ evo_data(push, 0x00000000);
+ } else {
+ evo_mthd(push, 0x0480 + (nv_crtc->index * 0x300), 1);
+ evo_data(push, 0x05000000);
+ evo_mthd(push, 0x048c + (nv_crtc->index * 0x300), 1);
+ evo_data(push, 0x00000000);
+ }
+ evo_kick(push, mast);
+ }
+}
- NV_DEBUG(drm, "SOR-%d mc: 0x%08x\n", i, mc);
- if (!(mc & (1 << crtc)))
- continue;
+static void
+nv50_crtc_cursor_show_hide(struct nouveau_crtc *nv_crtc, bool show, bool update)
+{
+ struct nv50_mast *mast = nv50_mast(nv_crtc->base.dev);
+
+ if (show)
+ nv50_crtc_cursor_show(nv_crtc);
+ else
+ nv50_crtc_cursor_hide(nv_crtc);
+
+ if (update) {
+ u32 *push = evo_wait(mast, 2);
+ if (push) {
+ evo_mthd(push, 0x0080, 1);
+ evo_data(push, 0x00000000);
+ evo_kick(push, mast);
+ }
+ }
+}
- switch ((mc & 0x00000f00) >> 8) {
- case 0: type = DCB_OUTPUT_LVDS; break;
- case 1: type = DCB_OUTPUT_TMDS; break;
- case 2: type = DCB_OUTPUT_TMDS; break;
- case 5: type = DCB_OUTPUT_TMDS; break;
- case 8: type = DCB_OUTPUT_DP; break;
- case 9: type = DCB_OUTPUT_DP; break;
- default:
- NV_ERROR(drm, "invalid mc, SOR-%d: 0x%08x\n", i, mc);
- goto ack;
+static void
+nv50_crtc_dpms(struct drm_crtc *crtc, int mode)
+{
+}
+
+static void
+nv50_crtc_prepare(struct drm_crtc *crtc)
+{
+ struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
+ struct nv50_mast *mast = nv50_mast(crtc->dev);
+ u32 *push;
+
+ nv50_display_flip_stop(crtc);
+
+ push = evo_wait(mast, 2);
+ if (push) {
+ if (nv50_vers(mast) < NV84_DISP_MAST_CLASS) {
+ evo_mthd(push, 0x0874 + (nv_crtc->index * 0x400), 1);
+ evo_data(push, 0x00000000);
+ evo_mthd(push, 0x0840 + (nv_crtc->index * 0x400), 1);
+ evo_data(push, 0x40000000);
+ } else
+ if (nv50_vers(mast) < NVD0_DISP_MAST_CLASS) {
+ evo_mthd(push, 0x0874 + (nv_crtc->index * 0x400), 1);
+ evo_data(push, 0x00000000);
+ evo_mthd(push, 0x0840 + (nv_crtc->index * 0x400), 1);
+ evo_data(push, 0x40000000);
+ evo_mthd(push, 0x085c + (nv_crtc->index * 0x400), 1);
+ evo_data(push, 0x00000000);
+ } else {
+ evo_mthd(push, 0x0474 + (nv_crtc->index * 0x300), 1);
+ evo_data(push, 0x00000000);
+ evo_mthd(push, 0x0440 + (nv_crtc->index * 0x300), 1);
+ evo_data(push, 0x03000000);
+ evo_mthd(push, 0x045c + (nv_crtc->index * 0x300), 1);
+ evo_data(push, 0x00000000);
+ }
+
+ evo_kick(push, mast);
+ }
+
+ nv50_crtc_cursor_show_hide(nv_crtc, false, false);
+}
+
+static void
+nv50_crtc_commit(struct drm_crtc *crtc)
+{
+ struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
+ struct nv50_mast *mast = nv50_mast(crtc->dev);
+ u32 *push;
+
+ push = evo_wait(mast, 32);
+ if (push) {
+ if (nv50_vers(mast) < NV84_DISP_MAST_CLASS) {
+ evo_mthd(push, 0x0874 + (nv_crtc->index * 0x400), 1);
+ evo_data(push, NvEvoVRAM_LP);
+ evo_mthd(push, 0x0840 + (nv_crtc->index * 0x400), 2);
+ evo_data(push, 0xc0000000);
+ evo_data(push, nv_crtc->lut.nvbo->bo.offset >> 8);
+ } else
+ if (nv50_vers(mast) < NVD0_DISP_MAST_CLASS) {
+ evo_mthd(push, 0x0874 + (nv_crtc->index * 0x400), 1);
+ evo_data(push, nv_crtc->fb.tile_flags);
+ evo_mthd(push, 0x0840 + (nv_crtc->index * 0x400), 2);
+ evo_data(push, 0xc0000000);
+ evo_data(push, nv_crtc->lut.nvbo->bo.offset >> 8);
+ evo_mthd(push, 0x085c + (nv_crtc->index * 0x400), 1);
+ evo_data(push, NvEvoVRAM);
+ } else {
+ evo_mthd(push, 0x0474 + (nv_crtc->index * 0x300), 1);
+ evo_data(push, nv_crtc->fb.tile_flags);
+ evo_mthd(push, 0x0440 + (nv_crtc->index * 0x300), 4);
+ evo_data(push, 0x83000000);
+ evo_data(push, nv_crtc->lut.nvbo->bo.offset >> 8);
+ evo_data(push, 0x00000000);
+ evo_data(push, 0x00000000);
+ evo_mthd(push, 0x045c + (nv_crtc->index * 0x300), 1);
+ evo_data(push, NvEvoVRAM);
+ evo_mthd(push, 0x0430 + (nv_crtc->index * 0x300), 1);
+ evo_data(push, 0xffffff00);
}
- or = i;
+ evo_kick(push, mast);
+ }
+
+ nv50_crtc_cursor_show_hide(nv_crtc, nv_crtc->cursor.visible, true);
+ nv50_display_flip_next(crtc, crtc->fb, NULL, 1);
+}
+
+static bool
+nv50_crtc_mode_fixup(struct drm_crtc *crtc, const struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
+{
+ return true;
+}
+
+static int
+nv50_crtc_swap_fbs(struct drm_crtc *crtc, struct drm_framebuffer *old_fb)
+{
+ struct nouveau_framebuffer *nvfb = nouveau_framebuffer(crtc->fb);
+ int ret;
+
+ ret = nouveau_bo_pin(nvfb->nvbo, TTM_PL_FLAG_VRAM);
+ if (ret)
+ return ret;
+
+ if (old_fb) {
+ nvfb = nouveau_framebuffer(old_fb);
+ nouveau_bo_unpin(nvfb->nvbo);
}
- /* There was no encoder to disable */
- if (type == DCB_OUTPUT_ANY)
- goto ack;
+ return 0;
+}
+
+static int
+nv50_crtc_mode_set(struct drm_crtc *crtc, struct drm_display_mode *umode,
+ struct drm_display_mode *mode, int x, int y,
+ struct drm_framebuffer *old_fb)
+{
+ struct nv50_mast *mast = nv50_mast(crtc->dev);
+ struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
+ struct nouveau_connector *nv_connector;
+ u32 ilace = (mode->flags & DRM_MODE_FLAG_INTERLACE) ? 2 : 1;
+ u32 vscan = (mode->flags & DRM_MODE_FLAG_DBLSCAN) ? 2 : 1;
+ u32 hactive, hsynce, hbackp, hfrontp, hblanke, hblanks;
+ u32 vactive, vsynce, vbackp, vfrontp, vblanke, vblanks;
+ u32 vblan2e = 0, vblan2s = 1;
+ u32 *push;
+ int ret;
+
+ hactive = mode->htotal;
+ hsynce = mode->hsync_end - mode->hsync_start - 1;
+ hbackp = mode->htotal - mode->hsync_end;
+ hblanke = hsynce + hbackp;
+ hfrontp = mode->hsync_start - mode->hdisplay;
+ hblanks = mode->htotal - hfrontp - 1;
+
+ vactive = mode->vtotal * vscan / ilace;
+ vsynce = ((mode->vsync_end - mode->vsync_start) * vscan / ilace) - 1;
+ vbackp = (mode->vtotal - mode->vsync_end) * vscan / ilace;
+ vblanke = vsynce + vbackp;
+ vfrontp = (mode->vsync_start - mode->vdisplay) * vscan / ilace;
+ vblanks = vactive - vfrontp - 1;
+ if (mode->flags & DRM_MODE_FLAG_INTERLACE) {
+ vblan2e = vactive + vsynce + vbackp;
+ vblan2s = vblan2e + (mode->vdisplay * vscan / ilace);
+ vactive = (vactive * 2) + 1;
+ }
- /* Disable the encoder */
- for (i = 0; i < drm->vbios.dcb.entries; i++) {
- struct dcb_output *dcb = &drm->vbios.dcb.entry[i];
+ ret = nv50_crtc_swap_fbs(crtc, old_fb);
+ if (ret)
+ return ret;
- if (dcb->type == type && (dcb->or & (1 << or))) {
- nouveau_bios_run_display_table(dev, 0, -1, dcb, -1);
- disp->irq.dcb = dcb;
- goto ack;
+ push = evo_wait(mast, 64);
+ if (push) {
+ if (nv50_vers(mast) < NVD0_DISP_MAST_CLASS) {
+ evo_mthd(push, 0x0804 + (nv_crtc->index * 0x400), 2);
+ evo_data(push, 0x00800000 | mode->clock);
+ evo_data(push, (ilace == 2) ? 2 : 0);
+ evo_mthd(push, 0x0810 + (nv_crtc->index * 0x400), 6);
+ evo_data(push, 0x00000000);
+ evo_data(push, (vactive << 16) | hactive);
+ evo_data(push, ( vsynce << 16) | hsynce);
+ evo_data(push, (vblanke << 16) | hblanke);
+ evo_data(push, (vblanks << 16) | hblanks);
+ evo_data(push, (vblan2e << 16) | vblan2s);
+ evo_mthd(push, 0x082c + (nv_crtc->index * 0x400), 1);
+ evo_data(push, 0x00000000);
+ evo_mthd(push, 0x0900 + (nv_crtc->index * 0x400), 2);
+ evo_data(push, 0x00000311);
+ evo_data(push, 0x00000100);
+ } else {
+ evo_mthd(push, 0x0410 + (nv_crtc->index * 0x300), 6);
+ evo_data(push, 0x00000000);
+ evo_data(push, (vactive << 16) | hactive);
+ evo_data(push, ( vsynce << 16) | hsynce);
+ evo_data(push, (vblanke << 16) | hblanke);
+ evo_data(push, (vblanks << 16) | hblanks);
+ evo_data(push, (vblan2e << 16) | vblan2s);
+ evo_mthd(push, 0x042c + (nv_crtc->index * 0x300), 1);
+ evo_data(push, 0x00000000); /* ??? */
+ evo_mthd(push, 0x0450 + (nv_crtc->index * 0x300), 3);
+ evo_data(push, mode->clock * 1000);
+ evo_data(push, 0x00200000); /* ??? */
+ evo_data(push, mode->clock * 1000);
+ evo_mthd(push, 0x04d0 + (nv_crtc->index * 0x300), 2);
+ evo_data(push, 0x00000311);
+ evo_data(push, 0x00000100);
}
+
+ evo_kick(push, mast);
}
- NV_ERROR(drm, "no dcb for %d %d 0x%08x\n", or, type, mc);
-ack:
- nv_wr32(device, NV50_PDISPLAY_INTR_1, NV50_PDISPLAY_INTR_1_CLK_UNK10);
- nv_wr32(device, 0x610030, 0x80000000);
+ nv_connector = nouveau_crtc_connector_get(nv_crtc);
+ nv50_crtc_set_dither(nv_crtc, false);
+ nv50_crtc_set_scale(nv_crtc, false);
+ nv50_crtc_set_color_vibrance(nv_crtc, false);
+ nv50_crtc_set_image(nv_crtc, crtc->fb, x, y, false);
+ return 0;
+}
+
+static int
+nv50_crtc_mode_set_base(struct drm_crtc *crtc, int x, int y,
+ struct drm_framebuffer *old_fb)
+{
+ struct nouveau_drm *drm = nouveau_drm(crtc->dev);
+ struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
+ int ret;
+
+ if (!crtc->fb) {
+ NV_DEBUG(drm, "No FB bound\n");
+ return 0;
+ }
+
+ ret = nv50_crtc_swap_fbs(crtc, old_fb);
+ if (ret)
+ return ret;
+
+ nv50_display_flip_stop(crtc);
+ nv50_crtc_set_image(nv_crtc, crtc->fb, x, y, true);
+ nv50_display_flip_next(crtc, crtc->fb, NULL, 1);
+ return 0;
+}
+
+static int
+nv50_crtc_mode_set_base_atomic(struct drm_crtc *crtc,
+ struct drm_framebuffer *fb, int x, int y,
+ enum mode_set_atomic state)
+{
+ struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
+ nv50_display_flip_stop(crtc);
+ nv50_crtc_set_image(nv_crtc, fb, x, y, true);
+ return 0;
}
static void
-nv50_display_unk20_handler(struct drm_device *dev)
+nv50_crtc_lut_load(struct drm_crtc *crtc)
{
- struct nouveau_device *device = nouveau_dev(dev);
- struct nouveau_drm *drm = nouveau_drm(dev);
- struct nv50_display *disp = nv50_display(dev);
- u32 unk30 = nv_rd32(device, 0x610030), tmp, pclk, script, mc = 0;
- struct dcb_output *dcb;
- int i, crtc, or = 0, type = DCB_OUTPUT_ANY;
-
- NV_DEBUG(drm, "0x610030: 0x%08x\n", unk30);
- dcb = disp->irq.dcb;
- if (dcb) {
- nouveau_bios_run_display_table(dev, 0, -2, dcb, -1);
- disp->irq.dcb = NULL;
- }
-
- /* CRTC clock change requested? */
- crtc = ffs((unk30 & 0x00000600) >> 9) - 1;
- if (crtc >= 0) {
- pclk = nv_rd32(device, NV50_PDISPLAY_CRTC_P(crtc, CLOCK));
- pclk &= 0x003fffff;
- if (pclk)
- nv50_crtc_set_clock(dev, crtc, pclk);
-
- tmp = nv_rd32(device, NV50_PDISPLAY_CRTC_CLK_CTRL2(crtc));
- tmp &= ~0x000000f;
- nv_wr32(device, NV50_PDISPLAY_CRTC_CLK_CTRL2(crtc), tmp);
- }
-
- /* Nothing needs to be done for the encoder */
- crtc = ffs((unk30 & 0x00000180) >> 7) - 1;
- if (crtc < 0)
- goto ack;
- pclk = nv_rd32(device, NV50_PDISPLAY_CRTC_P(crtc, CLOCK)) & 0x003fffff;
-
- /* Find which encoder is connected to the CRTC */
- for (i = 0; type == DCB_OUTPUT_ANY && i < 3; i++) {
- mc = nv_rd32(device, NV50_PDISPLAY_DAC_MODE_CTRL_P(i));
- NV_DEBUG(drm, "DAC-%d mc: 0x%08x\n", i, mc);
- if (!(mc & (1 << crtc)))
- continue;
+ struct nv50_disp *disp = nv50_disp(crtc->dev);
+ struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
+ void __iomem *lut = nvbo_kmap_obj_iovirtual(nv_crtc->lut.nvbo);
+ int i;
- switch ((mc & 0x00000f00) >> 8) {
- case 0: type = DCB_OUTPUT_ANALOG; break;
- case 1: type = DCB_OUTPUT_TV; break;
- default:
- NV_ERROR(drm, "invalid mc, DAC-%d: 0x%08x\n", i, mc);
- goto ack;
+ for (i = 0; i < 256; i++) {
+ u16 r = nv_crtc->lut.r[i] >> 2;
+ u16 g = nv_crtc->lut.g[i] >> 2;
+ u16 b = nv_crtc->lut.b[i] >> 2;
+
+ if (nv_mclass(disp->core) < NVD0_DISP_CLASS) {
+ writew(r + 0x0000, lut + (i * 0x08) + 0);
+ writew(g + 0x0000, lut + (i * 0x08) + 2);
+ writew(b + 0x0000, lut + (i * 0x08) + 4);
+ } else {
+ writew(r + 0x6000, lut + (i * 0x20) + 0);
+ writew(g + 0x6000, lut + (i * 0x20) + 2);
+ writew(b + 0x6000, lut + (i * 0x20) + 4);
+ }
+ }
+}
+
+static int
+nv50_crtc_cursor_set(struct drm_crtc *crtc, struct drm_file *file_priv,
+ uint32_t handle, uint32_t width, uint32_t height)
+{
+ struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
+ struct drm_device *dev = crtc->dev;
+ struct drm_gem_object *gem;
+ struct nouveau_bo *nvbo;
+ bool visible = (handle != 0);
+ int i, ret = 0;
+
+ if (visible) {
+ if (width != 64 || height != 64)
+ return -EINVAL;
+
+ gem = drm_gem_object_lookup(dev, file_priv, handle);
+ if (unlikely(!gem))
+ return -ENOENT;
+ nvbo = nouveau_gem_object(gem);
+
+ ret = nouveau_bo_map(nvbo);
+ if (ret == 0) {
+ for (i = 0; i < 64 * 64; i++) {
+ u32 v = nouveau_bo_rd32(nvbo, i);
+ nouveau_bo_wr32(nv_crtc->cursor.nvbo, i, v);
+ }
+ nouveau_bo_unmap(nvbo);
}
- or = i;
+ drm_gem_object_unreference_unlocked(gem);
}
- for (i = 0; type == DCB_OUTPUT_ANY && i < nv50_sor_nr(dev); i++) {
- if (nv_device(drm->device)->chipset < 0x90 ||
- nv_device(drm->device)->chipset == 0x92 ||
- nv_device(drm->device)->chipset == 0xa0)
- mc = nv_rd32(device, NV50_PDISPLAY_SOR_MODE_CTRL_P(i));
- else
- mc = nv_rd32(device, NV90_PDISPLAY_SOR_MODE_CTRL_P(i));
+ if (visible != nv_crtc->cursor.visible) {
+ nv50_crtc_cursor_show_hide(nv_crtc, visible, true);
+ nv_crtc->cursor.visible = visible;
+ }
- NV_DEBUG(drm, "SOR-%d mc: 0x%08x\n", i, mc);
- if (!(mc & (1 << crtc)))
- continue;
+ return ret;
+}
- switch ((mc & 0x00000f00) >> 8) {
- case 0: type = DCB_OUTPUT_LVDS; break;
- case 1: type = DCB_OUTPUT_TMDS; break;
- case 2: type = DCB_OUTPUT_TMDS; break;
- case 5: type = DCB_OUTPUT_TMDS; break;
- case 8: type = DCB_OUTPUT_DP; break;
- case 9: type = DCB_OUTPUT_DP; break;
- default:
- NV_ERROR(drm, "invalid mc, SOR-%d: 0x%08x\n", i, mc);
- goto ack;
- }
+static int
+nv50_crtc_cursor_move(struct drm_crtc *crtc, int x, int y)
+{
+ struct nv50_curs *curs = nv50_curs(crtc);
+ struct nv50_chan *chan = nv50_chan(curs);
+ nv_wo32(chan->user, 0x0084, (y << 16) | (x & 0xffff));
+ nv_wo32(chan->user, 0x0080, 0x00000000);
+ return 0;
+}
- or = i;
+static void
+nv50_crtc_gamma_set(struct drm_crtc *crtc, u16 *r, u16 *g, u16 *b,
+ uint32_t start, uint32_t size)
+{
+ struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
+ u32 end = max(start + size, (u32)256);
+ u32 i;
+
+ for (i = start; i < end; i++) {
+ nv_crtc->lut.r[i] = r[i];
+ nv_crtc->lut.g[i] = g[i];
+ nv_crtc->lut.b[i] = b[i];
}
- if (type == DCB_OUTPUT_ANY)
- goto ack;
+ nv50_crtc_lut_load(crtc);
+}
- /* Enable the encoder */
- for (i = 0; i < drm->vbios.dcb.entries; i++) {
- dcb = &drm->vbios.dcb.entry[i];
- if (dcb->type == type && (dcb->or & (1 << or)))
- break;
+static void
+nv50_crtc_destroy(struct drm_crtc *crtc)
+{
+ struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
+ struct nv50_disp *disp = nv50_disp(crtc->dev);
+ struct nv50_head *head = nv50_head(crtc);
+ nv50_dmac_destroy(disp->core, &head->ovly.base);
+ nv50_pioc_destroy(disp->core, &head->oimm.base);
+ nv50_dmac_destroy(disp->core, &head->sync.base);
+ nv50_pioc_destroy(disp->core, &head->curs.base);
+ nouveau_bo_unmap(nv_crtc->cursor.nvbo);
+ if (nv_crtc->cursor.nvbo)
+ nouveau_bo_unpin(nv_crtc->cursor.nvbo);
+ nouveau_bo_ref(NULL, &nv_crtc->cursor.nvbo);
+ nouveau_bo_unmap(nv_crtc->lut.nvbo);
+ if (nv_crtc->lut.nvbo)
+ nouveau_bo_unpin(nv_crtc->lut.nvbo);
+ nouveau_bo_ref(NULL, &nv_crtc->lut.nvbo);
+ drm_crtc_cleanup(crtc);
+ kfree(crtc);
+}
+
+static const struct drm_crtc_helper_funcs nv50_crtc_hfunc = {
+ .dpms = nv50_crtc_dpms,
+ .prepare = nv50_crtc_prepare,
+ .commit = nv50_crtc_commit,
+ .mode_fixup = nv50_crtc_mode_fixup,
+ .mode_set = nv50_crtc_mode_set,
+ .mode_set_base = nv50_crtc_mode_set_base,
+ .mode_set_base_atomic = nv50_crtc_mode_set_base_atomic,
+ .load_lut = nv50_crtc_lut_load,
+};
+
+static const struct drm_crtc_funcs nv50_crtc_func = {
+ .cursor_set = nv50_crtc_cursor_set,
+ .cursor_move = nv50_crtc_cursor_move,
+ .gamma_set = nv50_crtc_gamma_set,
+ .set_config = drm_crtc_helper_set_config,
+ .destroy = nv50_crtc_destroy,
+ .page_flip = nouveau_crtc_page_flip,
+};
+
+static void
+nv50_cursor_set_pos(struct nouveau_crtc *nv_crtc, int x, int y)
+{
+}
+
+static void
+nv50_cursor_set_offset(struct nouveau_crtc *nv_crtc, uint32_t offset)
+{
+}
+
+static int
+nv50_crtc_create(struct drm_device *dev, struct nouveau_object *core, int index)
+{
+ struct nv50_disp *disp = nv50_disp(dev);
+ struct nv50_head *head;
+ struct drm_crtc *crtc;
+ int ret, i;
+
+ head = kzalloc(sizeof(*head), GFP_KERNEL);
+ if (!head)
+ return -ENOMEM;
+
+ head->base.index = index;
+ head->base.set_dither = nv50_crtc_set_dither;
+ head->base.set_scale = nv50_crtc_set_scale;
+ head->base.set_color_vibrance = nv50_crtc_set_color_vibrance;
+ head->base.color_vibrance = 50;
+ head->base.vibrant_hue = 0;
+ head->base.cursor.set_offset = nv50_cursor_set_offset;
+ head->base.cursor.set_pos = nv50_cursor_set_pos;
+ for (i = 0; i < 256; i++) {
+ head->base.lut.r[i] = i << 8;
+ head->base.lut.g[i] = i << 8;
+ head->base.lut.b[i] = i << 8;
}
- if (i == drm->vbios.dcb.entries) {
- NV_ERROR(drm, "no dcb for %d %d 0x%08x\n", or, type, mc);
- goto ack;
+ crtc = &head->base.base;
+ drm_crtc_init(dev, crtc, &nv50_crtc_func);
+ drm_crtc_helper_add(crtc, &nv50_crtc_hfunc);
+ drm_mode_crtc_set_gamma_size(crtc, 256);
+
+ ret = nouveau_bo_new(dev, 8192, 0x100, TTM_PL_FLAG_VRAM,
+ 0, 0x0000, NULL, &head->base.lut.nvbo);
+ if (!ret) {
+ ret = nouveau_bo_pin(head->base.lut.nvbo, TTM_PL_FLAG_VRAM);
+ if (!ret) {
+ ret = nouveau_bo_map(head->base.lut.nvbo);
+ if (ret)
+ nouveau_bo_unpin(head->base.lut.nvbo);
+ }
+ if (ret)
+ nouveau_bo_ref(NULL, &head->base.lut.nvbo);
}
- script = nv50_display_script_select(dev, dcb, mc, pclk);
- nouveau_bios_run_display_table(dev, script, pclk, dcb, -1);
+ if (ret)
+ goto out;
- if (type == DCB_OUTPUT_DP) {
- int link = !(dcb->dpconf.sor.link & 1);
- if ((mc & 0x000f0000) == 0x00020000)
- nv50_sor_dp_calc_tu(dev, or, link, pclk, 18);
- else
- nv50_sor_dp_calc_tu(dev, or, link, pclk, 24);
+ nv50_crtc_lut_load(crtc);
+
+ /* allocate cursor resources */
+ ret = nv50_pioc_create(disp->core, NV50_DISP_CURS_CLASS, index,
+ &(struct nv50_display_curs_class) {
+ .head = index,
+ }, sizeof(struct nv50_display_curs_class),
+ &head->curs.base);
+ if (ret)
+ goto out;
+
+ ret = nouveau_bo_new(dev, 64 * 64 * 4, 0x100, TTM_PL_FLAG_VRAM,
+ 0, 0x0000, NULL, &head->base.cursor.nvbo);
+ if (!ret) {
+ ret = nouveau_bo_pin(head->base.cursor.nvbo, TTM_PL_FLAG_VRAM);
+ if (!ret) {
+ ret = nouveau_bo_map(head->base.cursor.nvbo);
+ if (ret)
+ nouveau_bo_unpin(head->base.lut.nvbo);
+ }
+ if (ret)
+ nouveau_bo_ref(NULL, &head->base.cursor.nvbo);
}
- if (dcb->type != DCB_OUTPUT_ANALOG) {
- tmp = nv_rd32(device, NV50_PDISPLAY_SOR_CLK_CTRL2(or));
- tmp &= ~0x00000f0f;
- if (script & 0x0100)
- tmp |= 0x00000101;
- nv_wr32(device, NV50_PDISPLAY_SOR_CLK_CTRL2(or), tmp);
- } else {
- nv_wr32(device, NV50_PDISPLAY_DAC_CLK_CTRL2(or), 0);
+ if (ret)
+ goto out;
+
+ /* allocate page flip / sync resources */
+ ret = nv50_dmac_create(disp->core, NV50_DISP_SYNC_CLASS, index,
+ &(struct nv50_display_sync_class) {
+ .pushbuf = EVO_PUSH_HANDLE(SYNC, index),
+ .head = index,
+ }, sizeof(struct nv50_display_sync_class),
+ disp->sync->bo.offset, &head->sync.base);
+ if (ret)
+ goto out;
+
+ head->sync.sem.offset = EVO_SYNC(1 + index, 0x00);
+
+ /* allocate overlay resources */
+ ret = nv50_pioc_create(disp->core, NV50_DISP_OIMM_CLASS, index,
+ &(struct nv50_display_oimm_class) {
+ .head = index,
+ }, sizeof(struct nv50_display_oimm_class),
+ &head->oimm.base);
+ if (ret)
+ goto out;
+
+ ret = nv50_dmac_create(disp->core, NV50_DISP_OVLY_CLASS, index,
+ &(struct nv50_display_ovly_class) {
+ .pushbuf = EVO_PUSH_HANDLE(OVLY, index),
+ .head = index,
+ }, sizeof(struct nv50_display_ovly_class),
+ disp->sync->bo.offset, &head->ovly.base);
+ if (ret)
+ goto out;
+
+out:
+ if (ret)
+ nv50_crtc_destroy(crtc);
+ return ret;
+}
+
+/******************************************************************************
+ * DAC
+ *****************************************************************************/
+static void
+nv50_dac_dpms(struct drm_encoder *encoder, int mode)
+{
+ struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
+ struct nv50_disp *disp = nv50_disp(encoder->dev);
+ int or = nv_encoder->or;
+ u32 dpms_ctrl;
+
+ dpms_ctrl = 0x00000000;
+ if (mode == DRM_MODE_DPMS_STANDBY || mode == DRM_MODE_DPMS_OFF)
+ dpms_ctrl |= 0x00000001;
+ if (mode == DRM_MODE_DPMS_SUSPEND || mode == DRM_MODE_DPMS_OFF)
+ dpms_ctrl |= 0x00000004;
+
+ nv_call(disp->core, NV50_DISP_DAC_PWR + or, dpms_ctrl);
+}
+
+static bool
+nv50_dac_mode_fixup(struct drm_encoder *encoder,
+ const struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
+{
+ struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
+ struct nouveau_connector *nv_connector;
+
+ nv_connector = nouveau_encoder_connector_get(nv_encoder);
+ if (nv_connector && nv_connector->native_mode) {
+ if (nv_connector->scaling_mode != DRM_MODE_SCALE_NONE) {
+ int id = adjusted_mode->base.id;
+ *adjusted_mode = *nv_connector->native_mode;
+ adjusted_mode->base.id = id;
+ }
}
- disp->irq.dcb = dcb;
- disp->irq.pclk = pclk;
- disp->irq.script = script;
+ return true;
+}
+
+static void
+nv50_dac_commit(struct drm_encoder *encoder)
+{
+}
+
+static void
+nv50_dac_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
+{
+ struct nv50_mast *mast = nv50_mast(encoder->dev);
+ struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
+ struct nouveau_crtc *nv_crtc = nouveau_crtc(encoder->crtc);
+ u32 *push;
+
+ nv50_dac_dpms(encoder, DRM_MODE_DPMS_ON);
+
+ push = evo_wait(mast, 8);
+ if (push) {
+ if (nv50_vers(mast) < NVD0_DISP_MAST_CLASS) {
+ u32 syncs = 0x00000000;
+
+ if (mode->flags & DRM_MODE_FLAG_NHSYNC)
+ syncs |= 0x00000001;
+ if (mode->flags & DRM_MODE_FLAG_NVSYNC)
+ syncs |= 0x00000002;
+
+ evo_mthd(push, 0x0400 + (nv_encoder->or * 0x080), 2);
+ evo_data(push, 1 << nv_crtc->index);
+ evo_data(push, syncs);
+ } else {
+ u32 magic = 0x31ec6000 | (nv_crtc->index << 25);
+ u32 syncs = 0x00000001;
+
+ if (mode->flags & DRM_MODE_FLAG_NHSYNC)
+ syncs |= 0x00000008;
+ if (mode->flags & DRM_MODE_FLAG_NVSYNC)
+ syncs |= 0x00000010;
+
+ if (mode->flags & DRM_MODE_FLAG_INTERLACE)
+ magic |= 0x00000001;
+
+ evo_mthd(push, 0x0404 + (nv_crtc->index * 0x300), 2);
+ evo_data(push, syncs);
+ evo_data(push, magic);
+ evo_mthd(push, 0x0180 + (nv_encoder->or * 0x020), 1);
+ evo_data(push, 1 << nv_crtc->index);
+ }
+
+ evo_kick(push, mast);
+ }
-ack:
- nv_wr32(device, NV50_PDISPLAY_INTR_1, NV50_PDISPLAY_INTR_1_CLK_UNK20);
- nv_wr32(device, 0x610030, 0x80000000);
+ nv_encoder->crtc = encoder->crtc;
}
-/* If programming a TMDS output on a SOR that can also be configured for
- * DisplayPort, make sure NV50_SOR_DP_CTRL_ENABLE is forced off.
- *
- * It looks like the VBIOS TMDS scripts make an attempt at this, however,
- * the VBIOS scripts on at least one board I have only switch it off on
- * link 0, causing a blank display if the output has previously been
- * programmed for DisplayPort.
- */
static void
-nv50_display_unk40_dp_set_tmds(struct drm_device *dev, struct dcb_output *dcb)
+nv50_dac_disconnect(struct drm_encoder *encoder)
{
- struct nouveau_device *device = nouveau_dev(dev);
- int or = ffs(dcb->or) - 1, link = !(dcb->dpconf.sor.link & 1);
+ struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
+ struct nv50_mast *mast = nv50_mast(encoder->dev);
+ const int or = nv_encoder->or;
+ u32 *push;
+
+ if (nv_encoder->crtc) {
+ nv50_crtc_prepare(nv_encoder->crtc);
+
+ push = evo_wait(mast, 4);
+ if (push) {
+ if (nv50_vers(mast) < NVD0_DISP_MAST_CLASS) {
+ evo_mthd(push, 0x0400 + (or * 0x080), 1);
+ evo_data(push, 0x00000000);
+ } else {
+ evo_mthd(push, 0x0180 + (or * 0x020), 1);
+ evo_data(push, 0x00000000);
+ }
+
+ evo_mthd(push, 0x0080, 1);
+ evo_data(push, 0x00000000);
+ evo_kick(push, mast);
+ }
+ }
+
+ nv_encoder->crtc = NULL;
+}
+
+static enum drm_connector_status
+nv50_dac_detect(struct drm_encoder *encoder, struct drm_connector *connector)
+{
+ struct nv50_disp *disp = nv50_disp(encoder->dev);
+ int ret, or = nouveau_encoder(encoder)->or;
+ u32 load = 0;
+
+ ret = nv_exec(disp->core, NV50_DISP_DAC_LOAD + or, &load, sizeof(load));
+ if (ret || load != 7)
+ return connector_status_disconnected;
+
+ return connector_status_connected;
+}
+
+static void
+nv50_dac_destroy(struct drm_encoder *encoder)
+{
+ drm_encoder_cleanup(encoder);
+ kfree(encoder);
+}
+
+static const struct drm_encoder_helper_funcs nv50_dac_hfunc = {
+ .dpms = nv50_dac_dpms,
+ .mode_fixup = nv50_dac_mode_fixup,
+ .prepare = nv50_dac_disconnect,
+ .commit = nv50_dac_commit,
+ .mode_set = nv50_dac_mode_set,
+ .disable = nv50_dac_disconnect,
+ .get_crtc = nv50_display_crtc_get,
+ .detect = nv50_dac_detect
+};
+
+static const struct drm_encoder_funcs nv50_dac_func = {
+ .destroy = nv50_dac_destroy,
+};
+
+static int
+nv50_dac_create(struct drm_connector *connector, struct dcb_output *dcbe)
+{
+ struct drm_device *dev = connector->dev;
+ struct nouveau_encoder *nv_encoder;
struct drm_encoder *encoder;
- u32 tmp;
- if (dcb->type != DCB_OUTPUT_TMDS)
+ nv_encoder = kzalloc(sizeof(*nv_encoder), GFP_KERNEL);
+ if (!nv_encoder)
+ return -ENOMEM;
+ nv_encoder->dcb = dcbe;
+ nv_encoder->or = ffs(dcbe->or) - 1;
+
+ encoder = to_drm_encoder(nv_encoder);
+ encoder->possible_crtcs = dcbe->heads;
+ encoder->possible_clones = 0;
+ drm_encoder_init(dev, encoder, &nv50_dac_func, DRM_MODE_ENCODER_DAC);
+ drm_encoder_helper_add(encoder, &nv50_dac_hfunc);
+
+ drm_mode_connector_attach_encoder(connector, encoder);
+ return 0;
+}
+
+/******************************************************************************
+ * Audio
+ *****************************************************************************/
+static void
+nv50_audio_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode)
+{
+ struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
+ struct nouveau_connector *nv_connector;
+ struct nv50_disp *disp = nv50_disp(encoder->dev);
+
+ nv_connector = nouveau_encoder_connector_get(nv_encoder);
+ if (!drm_detect_monitor_audio(nv_connector->edid))
+ return;
+
+ drm_edid_to_eld(&nv_connector->base, nv_connector->edid);
+
+ nv_exec(disp->core, NVA3_DISP_SOR_HDA_ELD + nv_encoder->or,
+ nv_connector->base.eld,
+ nv_connector->base.eld[2] * 4);
+}
+
+static void
+nv50_audio_disconnect(struct drm_encoder *encoder)
+{
+ struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
+ struct nv50_disp *disp = nv50_disp(encoder->dev);
+
+ nv_exec(disp->core, NVA3_DISP_SOR_HDA_ELD + nv_encoder->or, NULL, 0);
+}
+
+/******************************************************************************
+ * HDMI
+ *****************************************************************************/
+static void
+nv50_hdmi_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode)
+{
+ struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
+ struct nouveau_crtc *nv_crtc = nouveau_crtc(encoder->crtc);
+ struct nouveau_connector *nv_connector;
+ struct nv50_disp *disp = nv50_disp(encoder->dev);
+ const u32 moff = (nv_crtc->index << 3) | nv_encoder->or;
+ u32 rekey = 56; /* binary driver, and tegra constant */
+ u32 max_ac_packet;
+
+ nv_connector = nouveau_encoder_connector_get(nv_encoder);
+ if (!drm_detect_hdmi_monitor(nv_connector->edid))
return;
- list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
- struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
+ max_ac_packet = mode->htotal - mode->hdisplay;
+ max_ac_packet -= rekey;
+ max_ac_packet -= 18; /* constant from tegra */
+ max_ac_packet /= 32;
+
+ nv_call(disp->core, NV84_DISP_SOR_HDMI_PWR + moff,
+ NV84_DISP_SOR_HDMI_PWR_STATE_ON |
+ (max_ac_packet << 16) | rekey);
- if (nv_encoder->dcb->type == DCB_OUTPUT_DP &&
- nv_encoder->dcb->or & (1 << or)) {
- tmp = nv_rd32(device, NV50_SOR_DP_CTRL(or, link));
- tmp &= ~NV50_SOR_DP_CTRL_ENABLED;
- nv_wr32(device, NV50_SOR_DP_CTRL(or, link), tmp);
+ nv50_audio_mode_set(encoder, mode);
+}
+
+static void
+nv50_hdmi_disconnect(struct drm_encoder *encoder)
+{
+ struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
+ struct nouveau_crtc *nv_crtc = nouveau_crtc(nv_encoder->crtc);
+ struct nv50_disp *disp = nv50_disp(encoder->dev);
+ const u32 moff = (nv_crtc->index << 3) | nv_encoder->or;
+
+ nv50_audio_disconnect(encoder);
+
+ nv_call(disp->core, NV84_DISP_SOR_HDMI_PWR + moff, 0x00000000);
+}
+
+/******************************************************************************
+ * SOR
+ *****************************************************************************/
+static void
+nv50_sor_dpms(struct drm_encoder *encoder, int mode)
+{
+ struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
+ struct drm_device *dev = encoder->dev;
+ struct nv50_disp *disp = nv50_disp(dev);
+ struct drm_encoder *partner;
+ int or = nv_encoder->or;
+
+ nv_encoder->last_dpms = mode;
+
+ list_for_each_entry(partner, &dev->mode_config.encoder_list, head) {
+ struct nouveau_encoder *nv_partner = nouveau_encoder(partner);
+
+ if (partner->encoder_type != DRM_MODE_ENCODER_TMDS)
+ continue;
+
+ if (nv_partner != nv_encoder &&
+ nv_partner->dcb->or == nv_encoder->dcb->or) {
+ if (nv_partner->last_dpms == DRM_MODE_DPMS_ON)
+ return;
break;
}
}
+
+ nv_call(disp->core, NV50_DISP_SOR_PWR + or, (mode == DRM_MODE_DPMS_ON));
+
+ if (nv_encoder->dcb->type == DCB_OUTPUT_DP)
+ nouveau_dp_dpms(encoder, mode, nv_encoder->dp.datarate, disp->core);
+}
+
+static bool
+nv50_sor_mode_fixup(struct drm_encoder *encoder,
+ const struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
+{
+ struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
+ struct nouveau_connector *nv_connector;
+
+ nv_connector = nouveau_encoder_connector_get(nv_encoder);
+ if (nv_connector && nv_connector->native_mode) {
+ if (nv_connector->scaling_mode != DRM_MODE_SCALE_NONE) {
+ int id = adjusted_mode->base.id;
+ *adjusted_mode = *nv_connector->native_mode;
+ adjusted_mode->base.id = id;
+ }
+ }
+
+ return true;
}
static void
-nv50_display_unk40_handler(struct drm_device *dev)
+nv50_sor_disconnect(struct drm_encoder *encoder)
{
- struct nouveau_device *device = nouveau_dev(dev);
- struct nouveau_drm *drm = nouveau_drm(dev);
- struct nv50_display *disp = nv50_display(dev);
- struct dcb_output *dcb = disp->irq.dcb;
- u16 script = disp->irq.script;
- u32 unk30 = nv_rd32(device, 0x610030), pclk = disp->irq.pclk;
+ struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
+ struct nv50_mast *mast = nv50_mast(encoder->dev);
+ const int or = nv_encoder->or;
+ u32 *push;
+
+ if (nv_encoder->crtc) {
+ nv50_crtc_prepare(nv_encoder->crtc);
+
+ push = evo_wait(mast, 4);
+ if (push) {
+ if (nv50_vers(mast) < NVD0_DISP_MAST_CLASS) {
+ evo_mthd(push, 0x0600 + (or * 0x40), 1);
+ evo_data(push, 0x00000000);
+ } else {
+ evo_mthd(push, 0x0200 + (or * 0x20), 1);
+ evo_data(push, 0x00000000);
+ }
+
+ evo_mthd(push, 0x0080, 1);
+ evo_data(push, 0x00000000);
+ evo_kick(push, mast);
+ }
- NV_DEBUG(drm, "0x610030: 0x%08x\n", unk30);
- disp->irq.dcb = NULL;
- if (!dcb)
- goto ack;
+ nv50_hdmi_disconnect(encoder);
+ }
- nouveau_bios_run_display_table(dev, script, -pclk, dcb, -1);
- nv50_display_unk40_dp_set_tmds(dev, dcb);
+ nv_encoder->last_dpms = DRM_MODE_DPMS_OFF;
+ nv_encoder->crtc = NULL;
+}
-ack:
- nv_wr32(device, NV50_PDISPLAY_INTR_1, NV50_PDISPLAY_INTR_1_CLK_UNK40);
- nv_wr32(device, 0x610030, 0x80000000);
- nv_wr32(device, 0x619494, nv_rd32(device, 0x619494) | 8);
+static void
+nv50_sor_prepare(struct drm_encoder *encoder)
+{
+ nv50_sor_disconnect(encoder);
+ if (nouveau_encoder(encoder)->dcb->type == DCB_OUTPUT_DP)
+ evo_sync(encoder->dev);
}
static void
-nv50_display_bh(unsigned long data)
+nv50_sor_commit(struct drm_encoder *encoder)
{
- struct drm_device *dev = (struct drm_device *)data;
- struct nouveau_device *device = nouveau_dev(dev);
+}
+
+static void
+nv50_sor_mode_set(struct drm_encoder *encoder, struct drm_display_mode *umode,
+ struct drm_display_mode *mode)
+{
+ struct nv50_disp *disp = nv50_disp(encoder->dev);
+ struct nv50_mast *mast = nv50_mast(encoder->dev);
+ struct drm_device *dev = encoder->dev;
struct nouveau_drm *drm = nouveau_drm(dev);
+ struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
+ struct nouveau_crtc *nv_crtc = nouveau_crtc(encoder->crtc);
+ struct nouveau_connector *nv_connector;
+ struct nvbios *bios = &drm->vbios;
+ u32 *push, lvds = 0;
+ u8 owner = 1 << nv_crtc->index;
+ u8 proto = 0xf;
+ u8 depth = 0x0;
- for (;;) {
- uint32_t intr0 = nv_rd32(device, NV50_PDISPLAY_INTR_0);
- uint32_t intr1 = nv_rd32(device, NV50_PDISPLAY_INTR_1);
+ nv_connector = nouveau_encoder_connector_get(nv_encoder);
+ switch (nv_encoder->dcb->type) {
+ case DCB_OUTPUT_TMDS:
+ if (nv_encoder->dcb->sorconf.link & 1) {
+ if (mode->clock < 165000)
+ proto = 0x1;
+ else
+ proto = 0x5;
+ } else {
+ proto = 0x2;
+ }
- NV_DEBUG(drm, "PDISPLAY_INTR_BH 0x%08x 0x%08x\n", intr0, intr1);
+ nv50_hdmi_mode_set(encoder, mode);
+ break;
+ case DCB_OUTPUT_LVDS:
+ proto = 0x0;
- if (intr1 & NV50_PDISPLAY_INTR_1_CLK_UNK10)
- nv50_display_unk10_handler(dev);
- else
- if (intr1 & NV50_PDISPLAY_INTR_1_CLK_UNK20)
- nv50_display_unk20_handler(dev);
- else
- if (intr1 & NV50_PDISPLAY_INTR_1_CLK_UNK40)
- nv50_display_unk40_handler(dev);
+ if (bios->fp_no_ddc) {
+ if (bios->fp.dual_link)
+ lvds |= 0x0100;
+ if (bios->fp.if_is_24bit)
+ lvds |= 0x0200;
+ } else {
+ if (nv_connector->type == DCB_CONNECTOR_LVDS_SPWG) {
+ if (((u8 *)nv_connector->edid)[121] == 2)
+ lvds |= 0x0100;
+ } else
+ if (mode->clock >= bios->fp.duallink_transition_clk) {
+ lvds |= 0x0100;
+ }
+
+ if (lvds & 0x0100) {
+ if (bios->fp.strapless_is_24bit & 2)
+ lvds |= 0x0200;
+ } else {
+ if (bios->fp.strapless_is_24bit & 1)
+ lvds |= 0x0200;
+ }
+
+ if (nv_connector->base.display_info.bpc == 8)
+ lvds |= 0x0200;
+ }
+
+ nv_call(disp->core, NV50_DISP_SOR_LVDS_SCRIPT + nv_encoder->or, lvds);
+ break;
+ case DCB_OUTPUT_DP:
+ if (nv_connector->base.display_info.bpc == 6) {
+ nv_encoder->dp.datarate = mode->clock * 18 / 8;
+ depth = 0x2;
+ } else
+ if (nv_connector->base.display_info.bpc == 8) {
+ nv_encoder->dp.datarate = mode->clock * 24 / 8;
+ depth = 0x5;
+ } else {
+ nv_encoder->dp.datarate = mode->clock * 30 / 8;
+ depth = 0x6;
+ }
+
+ if (nv_encoder->dcb->sorconf.link & 1)
+ proto = 0x8;
else
- break;
+ proto = 0x9;
+ break;
+ default:
+ BUG_ON(1);
+ break;
}
- nv_wr32(device, NV03_PMC_INTR_EN_0, 1);
+ nv50_sor_dpms(encoder, DRM_MODE_DPMS_ON);
+
+ push = evo_wait(nv50_mast(dev), 8);
+ if (push) {
+ if (nv50_vers(mast) < NVD0_DISP_CLASS) {
+ evo_mthd(push, 0x0600 + (nv_encoder->or * 0x040), 1);
+ evo_data(push, (depth << 16) | (proto << 8) | owner);
+ } else {
+ u32 magic = 0x31ec6000 | (nv_crtc->index << 25);
+ u32 syncs = 0x00000001;
+
+ if (mode->flags & DRM_MODE_FLAG_NHSYNC)
+ syncs |= 0x00000008;
+ if (mode->flags & DRM_MODE_FLAG_NVSYNC)
+ syncs |= 0x00000010;
+
+ if (mode->flags & DRM_MODE_FLAG_INTERLACE)
+ magic |= 0x00000001;
+
+ evo_mthd(push, 0x0404 + (nv_crtc->index * 0x300), 2);
+ evo_data(push, syncs | (depth << 6));
+ evo_data(push, magic);
+ evo_mthd(push, 0x0200 + (nv_encoder->or * 0x020), 1);
+ evo_data(push, owner | (proto << 8));
+ }
+
+ evo_kick(push, mast);
+ }
+
+ nv_encoder->crtc = encoder->crtc;
}
static void
-nv50_display_error_handler(struct drm_device *dev)
+nv50_sor_destroy(struct drm_encoder *encoder)
{
- struct nouveau_device *device = nouveau_dev(dev);
- struct nouveau_drm *drm = nouveau_drm(dev);
- u32 channels = (nv_rd32(device, NV50_PDISPLAY_INTR_0) & 0x001f0000) >> 16;
- u32 addr, data;
- int chid;
+ drm_encoder_cleanup(encoder);
+ kfree(encoder);
+}
- for (chid = 0; chid < 5; chid++) {
- if (!(channels & (1 << chid)))
- continue;
+static const struct drm_encoder_helper_funcs nv50_sor_hfunc = {
+ .dpms = nv50_sor_dpms,
+ .mode_fixup = nv50_sor_mode_fixup,
+ .prepare = nv50_sor_prepare,
+ .commit = nv50_sor_commit,
+ .mode_set = nv50_sor_mode_set,
+ .disable = nv50_sor_disconnect,
+ .get_crtc = nv50_display_crtc_get,
+};
+
+static const struct drm_encoder_funcs nv50_sor_func = {
+ .destroy = nv50_sor_destroy,
+};
+
+static int
+nv50_sor_create(struct drm_connector *connector, struct dcb_output *dcbe)
+{
+ struct drm_device *dev = connector->dev;
+ struct nouveau_encoder *nv_encoder;
+ struct drm_encoder *encoder;
+
+ nv_encoder = kzalloc(sizeof(*nv_encoder), GFP_KERNEL);
+ if (!nv_encoder)
+ return -ENOMEM;
+ nv_encoder->dcb = dcbe;
+ nv_encoder->or = ffs(dcbe->or) - 1;
+ nv_encoder->last_dpms = DRM_MODE_DPMS_OFF;
- nv_wr32(device, NV50_PDISPLAY_INTR_0, 0x00010000 << chid);
- addr = nv_rd32(device, NV50_PDISPLAY_TRAPPED_ADDR(chid));
- data = nv_rd32(device, NV50_PDISPLAY_TRAPPED_DATA(chid));
- NV_ERROR(drm, "EvoCh %d Mthd 0x%04x Data 0x%08x "
- "(0x%04x 0x%02x)\n", chid,
- addr & 0xffc, data, addr >> 16, (addr >> 12) & 0xf);
+ encoder = to_drm_encoder(nv_encoder);
+ encoder->possible_crtcs = dcbe->heads;
+ encoder->possible_clones = 0;
+ drm_encoder_init(dev, encoder, &nv50_sor_func, DRM_MODE_ENCODER_TMDS);
+ drm_encoder_helper_add(encoder, &nv50_sor_hfunc);
+
+ drm_mode_connector_attach_encoder(connector, encoder);
+ return 0;
+}
+
+/******************************************************************************
+ * Init
+ *****************************************************************************/
+void
+nv50_display_fini(struct drm_device *dev)
+{
+}
- nv_wr32(device, NV50_PDISPLAY_TRAPPED_ADDR(chid), 0x90000000);
+int
+nv50_display_init(struct drm_device *dev)
+{
+ u32 *push = evo_wait(nv50_mast(dev), 32);
+ if (push) {
+ evo_mthd(push, 0x0088, 1);
+ evo_data(push, NvEvoSync);
+ evo_kick(push, nv50_mast(dev));
+ return evo_sync(dev);
}
+
+ return -EBUSY;
}
void
-nv50_display_intr(struct drm_device *dev)
+nv50_display_destroy(struct drm_device *dev)
+{
+ struct nv50_disp *disp = nv50_disp(dev);
+
+ nv50_dmac_destroy(disp->core, &disp->mast.base);
+
+ nouveau_bo_unmap(disp->sync);
+ if (disp->sync)
+ nouveau_bo_unpin(disp->sync);
+ nouveau_bo_ref(NULL, &disp->sync);
+
+ nouveau_display(dev)->priv = NULL;
+ kfree(disp);
+}
+
+int
+nv50_display_create(struct drm_device *dev)
{
+ static const u16 oclass[] = {
+ NVE0_DISP_CLASS,
+ NVD0_DISP_CLASS,
+ NVA3_DISP_CLASS,
+ NV94_DISP_CLASS,
+ NVA0_DISP_CLASS,
+ NV84_DISP_CLASS,
+ NV50_DISP_CLASS,
+ };
struct nouveau_device *device = nouveau_dev(dev);
struct nouveau_drm *drm = nouveau_drm(dev);
- struct nv50_display *disp = nv50_display(dev);
- uint32_t delayed = 0;
-
- while (nv_rd32(device, NV50_PMC_INTR_0) & NV50_PMC_INTR_0_DISPLAY) {
- uint32_t intr0 = nv_rd32(device, NV50_PDISPLAY_INTR_0);
- uint32_t intr1 = nv_rd32(device, NV50_PDISPLAY_INTR_1);
- uint32_t clock;
+ struct dcb_table *dcb = &drm->vbios.dcb;
+ struct drm_connector *connector, *tmp;
+ struct nv50_disp *disp;
+ struct dcb_output *dcbe;
+ int crtcs, ret, i;
- NV_DEBUG(drm, "PDISPLAY_INTR 0x%08x 0x%08x\n", intr0, intr1);
+ disp = kzalloc(sizeof(*disp), GFP_KERNEL);
+ if (!disp)
+ return -ENOMEM;
- if (!intr0 && !(intr1 & ~delayed))
- break;
+ nouveau_display(dev)->priv = disp;
+ nouveau_display(dev)->dtor = nv50_display_destroy;
+ nouveau_display(dev)->init = nv50_display_init;
+ nouveau_display(dev)->fini = nv50_display_fini;
- if (intr0 & 0x001f0000) {
- nv50_display_error_handler(dev);
- intr0 &= ~0x001f0000;
+ /* small shared memory area we use for notifiers and semaphores */
+ ret = nouveau_bo_new(dev, 4096, 0x1000, TTM_PL_FLAG_VRAM,
+ 0, 0x0000, NULL, &disp->sync);
+ if (!ret) {
+ ret = nouveau_bo_pin(disp->sync, TTM_PL_FLAG_VRAM);
+ if (!ret) {
+ ret = nouveau_bo_map(disp->sync);
+ if (ret)
+ nouveau_bo_unpin(disp->sync);
}
+ if (ret)
+ nouveau_bo_ref(NULL, &disp->sync);
+ }
- if (intr1 & NV50_PDISPLAY_INTR_1_VBLANK_CRTC) {
- intr1 &= ~NV50_PDISPLAY_INTR_1_VBLANK_CRTC;
- delayed |= NV50_PDISPLAY_INTR_1_VBLANK_CRTC;
- }
+ if (ret)
+ goto out;
+
+ /* attempt to allocate a supported evo display class */
+ ret = -ENODEV;
+ for (i = 0; ret && i < ARRAY_SIZE(oclass); i++) {
+ ret = nouveau_object_new(nv_object(drm), NVDRM_DEVICE,
+ 0xd1500000, oclass[i], NULL, 0,
+ &disp->core);
+ }
- clock = (intr1 & (NV50_PDISPLAY_INTR_1_CLK_UNK10 |
- NV50_PDISPLAY_INTR_1_CLK_UNK20 |
- NV50_PDISPLAY_INTR_1_CLK_UNK40));
- if (clock) {
- nv_wr32(device, NV03_PMC_INTR_EN_0, 0);
- tasklet_schedule(&disp->tasklet);
- delayed |= clock;
- intr1 &= ~clock;
- }
+ if (ret)
+ goto out;
+
+ /* allocate master evo channel */
+ ret = nv50_dmac_create(disp->core, NV50_DISP_MAST_CLASS, 0,
+ &(struct nv50_display_mast_class) {
+ .pushbuf = EVO_PUSH_HANDLE(MAST, 0),
+ }, sizeof(struct nv50_display_mast_class),
+ disp->sync->bo.offset, &disp->mast.base);
+ if (ret)
+ goto out;
+
+ /* create crtc objects to represent the hw heads */
+ if (nv_mclass(disp->core) >= NVD0_DISP_CLASS)
+ crtcs = nv_rd32(device, 0x022448);
+ else
+ crtcs = 2;
+
+ for (i = 0; i < crtcs; i++) {
+ ret = nv50_crtc_create(dev, disp->core, i);
+ if (ret)
+ goto out;
+ }
+
+ /* create encoder/connector objects based on VBIOS DCB table */
+ for (i = 0, dcbe = &dcb->entry[0]; i < dcb->entries; i++, dcbe++) {
+ connector = nouveau_connector_create(dev, dcbe->connector);
+ if (IS_ERR(connector))
+ continue;
- if (intr0) {
- NV_ERROR(drm, "unknown PDISPLAY_INTR_0: 0x%08x\n", intr0);
- nv_wr32(device, NV50_PDISPLAY_INTR_0, intr0);
+ if (dcbe->location != DCB_LOC_ON_CHIP) {
+ NV_WARN(drm, "skipping off-chip encoder %d/%d\n",
+ dcbe->type, ffs(dcbe->or) - 1);
+ continue;
}
- if (intr1) {
- NV_ERROR(drm,
- "unknown PDISPLAY_INTR_1: 0x%08x\n", intr1);
- nv_wr32(device, NV50_PDISPLAY_INTR_1, intr1);
+ switch (dcbe->type) {
+ case DCB_OUTPUT_TMDS:
+ case DCB_OUTPUT_LVDS:
+ case DCB_OUTPUT_DP:
+ nv50_sor_create(connector, dcbe);
+ break;
+ case DCB_OUTPUT_ANALOG:
+ nv50_dac_create(connector, dcbe);
+ break;
+ default:
+ NV_WARN(drm, "skipping unsupported encoder %d/%d\n",
+ dcbe->type, ffs(dcbe->or) - 1);
+ continue;
}
}
+
+ /* cull any connectors we created that don't have an encoder */
+ list_for_each_entry_safe(connector, tmp, &dev->mode_config.connector_list, head) {
+ if (connector->encoder_ids[0])
+ continue;
+
+ NV_WARN(drm, "%s has no encoders, removing\n",
+ drm_get_connector_name(connector));
+ connector->funcs->destroy(connector);
+ }
+
+out:
+ if (ret)
+ nv50_display_destroy(dev);
+ return ret;
}
diff --git a/drivers/gpu/drm/nouveau/nv50_display.h b/drivers/gpu/drm/nouveau/nv50_display.h
index 973554d8a7a6..70da347aa8c5 100644
--- a/drivers/gpu/drm/nouveau/nv50_display.h
+++ b/drivers/gpu/drm/nouveau/nv50_display.h
@@ -30,77 +30,16 @@
#include "nouveau_display.h"
#include "nouveau_crtc.h"
#include "nouveau_reg.h"
-#include "nv50_evo.h"
-struct nv50_display_crtc {
- struct nouveau_channel *sync;
- struct {
- struct nouveau_bo *bo;
- u32 offset;
- u16 value;
- } sem;
-};
+int nv50_display_create(struct drm_device *);
+void nv50_display_destroy(struct drm_device *);
+int nv50_display_init(struct drm_device *);
+void nv50_display_fini(struct drm_device *);
-struct nv50_display {
- struct nouveau_channel *master;
-
- struct nouveau_gpuobj *ramin;
- u32 dmao;
- u32 hash;
-
- struct nv50_display_crtc crtc[2];
-
- struct tasklet_struct tasklet;
- struct {
- struct dcb_output *dcb;
- u16 script;
- u32 pclk;
- } irq;
-};
-
-static inline struct nv50_display *
-nv50_display(struct drm_device *dev)
-{
- return nouveau_display(dev)->priv;
-}
-
-int nv50_display_early_init(struct drm_device *dev);
-void nv50_display_late_takedown(struct drm_device *dev);
-int nv50_display_create(struct drm_device *dev);
-int nv50_display_init(struct drm_device *dev);
-void nv50_display_fini(struct drm_device *dev);
-void nv50_display_destroy(struct drm_device *dev);
-void nv50_display_intr(struct drm_device *);
-int nv50_crtc_blank(struct nouveau_crtc *, bool blank);
-int nv50_crtc_set_clock(struct drm_device *, int head, int pclk);
-
-u32 nv50_display_active_crtcs(struct drm_device *);
-
-int nv50_display_sync(struct drm_device *);
-int nv50_display_flip_next(struct drm_crtc *, struct drm_framebuffer *,
- struct nouveau_channel *chan);
void nv50_display_flip_stop(struct drm_crtc *);
-
-int nv50_evo_create(struct drm_device *dev);
-void nv50_evo_destroy(struct drm_device *dev);
-int nv50_evo_init(struct drm_device *dev);
-void nv50_evo_fini(struct drm_device *dev);
-void nv50_evo_dmaobj_init(struct nouveau_gpuobj *, u32 memtype, u64 base,
- u64 size);
-int nv50_evo_dmaobj_new(struct nouveau_channel *, u32 handle, u32 memtype,
- u64 base, u64 size, struct nouveau_gpuobj **);
-
-int nvd0_display_create(struct drm_device *);
-void nvd0_display_destroy(struct drm_device *);
-int nvd0_display_init(struct drm_device *);
-void nvd0_display_fini(struct drm_device *);
-void nvd0_display_intr(struct drm_device *);
-
-void nvd0_display_flip_stop(struct drm_crtc *);
-int nvd0_display_flip_next(struct drm_crtc *, struct drm_framebuffer *,
+int nv50_display_flip_next(struct drm_crtc *, struct drm_framebuffer *,
struct nouveau_channel *, u32 swap_interval);
struct nouveau_bo *nv50_display_crtc_sema(struct drm_device *, int head);
-struct nouveau_bo *nvd0_display_crtc_sema(struct drm_device *, int head);
#endif /* __NV50_DISPLAY_H__ */
diff --git a/drivers/gpu/drm/nouveau/nv50_evo.c b/drivers/gpu/drm/nouveau/nv50_evo.c
deleted file mode 100644
index 9f6f55cdfa77..000000000000
--- a/drivers/gpu/drm/nouveau/nv50_evo.c
+++ /dev/null
@@ -1,403 +0,0 @@
-/*
- * Copyright 2010 Red Hat Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- * Authors: Ben Skeggs
- */
-
-#include <drm/drmP.h>
-
-#include "nouveau_drm.h"
-#include "nouveau_dma.h"
-#include "nv50_display.h"
-
-#include <core/gpuobj.h>
-
-#include <subdev/timer.h>
-#include <subdev/fb.h>
-
-static u32
-nv50_evo_rd32(struct nouveau_object *object, u32 addr)
-{
- void __iomem *iomem = object->oclass->ofuncs->rd08;
- return ioread32_native(iomem + addr);
-}
-
-static void
-nv50_evo_wr32(struct nouveau_object *object, u32 addr, u32 data)
-{
- void __iomem *iomem = object->oclass->ofuncs->rd08;
- iowrite32_native(data, iomem + addr);
-}
-
-static void
-nv50_evo_channel_del(struct nouveau_channel **pevo)
-{
- struct nouveau_channel *evo = *pevo;
-
- if (!evo)
- return;
- *pevo = NULL;
-
- nouveau_bo_unmap(evo->push.buffer);
- nouveau_bo_ref(NULL, &evo->push.buffer);
-
- if (evo->object)
- iounmap(evo->object->oclass->ofuncs);
-
- kfree(evo);
-}
-
-int
-nv50_evo_dmaobj_new(struct nouveau_channel *evo, u32 handle, u32 memtype,
- u64 base, u64 size, struct nouveau_gpuobj **pobj)
-{
- struct drm_device *dev = evo->fence;
- struct nouveau_drm *drm = nouveau_drm(dev);
- struct nv50_display *disp = nv50_display(dev);
- u32 dmao = disp->dmao;
- u32 hash = disp->hash;
- u32 flags5;
-
- if (nv_device(drm->device)->chipset < 0xc0) {
- /* not supported on 0x50, specified in format mthd */
- if (nv_device(drm->device)->chipset == 0x50)
- memtype = 0;
- flags5 = 0x00010000;
- } else {
- if (memtype & 0x80000000)
- flags5 = 0x00000000; /* large pages */
- else
- flags5 = 0x00020000;
- }
-
- nv_wo32(disp->ramin, dmao + 0x00, 0x0019003d | (memtype << 22));
- nv_wo32(disp->ramin, dmao + 0x04, lower_32_bits(base + size - 1));
- nv_wo32(disp->ramin, dmao + 0x08, lower_32_bits(base));
- nv_wo32(disp->ramin, dmao + 0x0c, upper_32_bits(base + size - 1) << 24 |
- upper_32_bits(base));
- nv_wo32(disp->ramin, dmao + 0x10, 0x00000000);
- nv_wo32(disp->ramin, dmao + 0x14, flags5);
-
- nv_wo32(disp->ramin, hash + 0x00, handle);
- nv_wo32(disp->ramin, hash + 0x04, (evo->handle << 28) | (dmao << 10) |
- evo->handle);
-
- disp->dmao += 0x20;
- disp->hash += 0x08;
- return 0;
-}
-
-static int
-nv50_evo_channel_new(struct drm_device *dev, int chid,
- struct nouveau_channel **pevo)
-{
- struct nouveau_drm *drm = nouveau_drm(dev);
- struct nv50_display *disp = nv50_display(dev);
- struct nouveau_channel *evo;
- int ret;
-
- evo = kzalloc(sizeof(struct nouveau_channel), GFP_KERNEL);
- if (!evo)
- return -ENOMEM;
- *pevo = evo;
-
- evo->drm = drm;
- evo->handle = chid;
- evo->fence = dev;
- evo->user_get = 4;
- evo->user_put = 0;
-
- ret = nouveau_bo_new(dev, 4096, 0, TTM_PL_FLAG_VRAM, 0, 0, NULL,
- &evo->push.buffer);
- if (ret == 0)
- ret = nouveau_bo_pin(evo->push.buffer, TTM_PL_FLAG_VRAM);
- if (ret) {
- NV_ERROR(drm, "Error creating EVO DMA push buffer: %d\n", ret);
- nv50_evo_channel_del(pevo);
- return ret;
- }
-
- ret = nouveau_bo_map(evo->push.buffer);
- if (ret) {
- NV_ERROR(drm, "Error mapping EVO DMA push buffer: %d\n", ret);
- nv50_evo_channel_del(pevo);
- return ret;
- }
-
- evo->object = kzalloc(sizeof(*evo->object), GFP_KERNEL);
-#ifdef NOUVEAU_OBJECT_MAGIC
- evo->object->_magic = NOUVEAU_OBJECT_MAGIC;
-#endif
- evo->object->parent = nv_object(disp->ramin)->parent;
- evo->object->engine = nv_object(disp->ramin)->engine;
- evo->object->oclass =
- kzalloc(sizeof(*evo->object->oclass), GFP_KERNEL);
- evo->object->oclass->ofuncs =
- kzalloc(sizeof(*evo->object->oclass->ofuncs), GFP_KERNEL);
- evo->object->oclass->ofuncs->rd32 = nv50_evo_rd32;
- evo->object->oclass->ofuncs->wr32 = nv50_evo_wr32;
- evo->object->oclass->ofuncs->rd08 =
- ioremap(pci_resource_start(dev->pdev, 0) +
- NV50_PDISPLAY_USER(evo->handle), PAGE_SIZE);
- return 0;
-}
-
-static int
-nv50_evo_channel_init(struct nouveau_channel *evo)
-{
- struct nouveau_drm *drm = evo->drm;
- struct nouveau_device *device = nv_device(drm->device);
- int id = evo->handle, ret, i;
- u64 pushbuf = evo->push.buffer->bo.offset;
- u32 tmp;
-
- tmp = nv_rd32(device, NV50_PDISPLAY_EVO_CTRL(id));
- if ((tmp & 0x009f0000) == 0x00020000)
- nv_wr32(device, NV50_PDISPLAY_EVO_CTRL(id), tmp | 0x00800000);
-
- tmp = nv_rd32(device, NV50_PDISPLAY_EVO_CTRL(id));
- if ((tmp & 0x003f0000) == 0x00030000)
- nv_wr32(device, NV50_PDISPLAY_EVO_CTRL(id), tmp | 0x00600000);
-
- /* initialise fifo */
- nv_wr32(device, NV50_PDISPLAY_EVO_DMA_CB(id), pushbuf >> 8 |
- NV50_PDISPLAY_EVO_DMA_CB_LOCATION_VRAM |
- NV50_PDISPLAY_EVO_DMA_CB_VALID);
- nv_wr32(device, NV50_PDISPLAY_EVO_UNK2(id), 0x00010000);
- nv_wr32(device, NV50_PDISPLAY_EVO_HASH_TAG(id), id);
- nv_mask(device, NV50_PDISPLAY_EVO_CTRL(id), NV50_PDISPLAY_EVO_CTRL_DMA,
- NV50_PDISPLAY_EVO_CTRL_DMA_ENABLED);
-
- nv_wr32(device, NV50_PDISPLAY_USER_PUT(id), 0x00000000);
- nv_wr32(device, NV50_PDISPLAY_EVO_CTRL(id), 0x01000003 |
- NV50_PDISPLAY_EVO_CTRL_DMA_ENABLED);
- if (!nv_wait(device, NV50_PDISPLAY_EVO_CTRL(id), 0x80000000, 0x00000000)) {
- NV_ERROR(drm, "EvoCh %d init timeout: 0x%08x\n", id,
- nv_rd32(device, NV50_PDISPLAY_EVO_CTRL(id)));
- return -EBUSY;
- }
-
- /* enable error reporting on the channel */
- nv_mask(device, 0x610028, 0x00000000, 0x00010001 << id);
-
- evo->dma.max = (4096/4) - 2;
- evo->dma.max &= ~7;
- evo->dma.put = 0;
- evo->dma.cur = evo->dma.put;
- evo->dma.free = evo->dma.max - evo->dma.cur;
-
- ret = RING_SPACE(evo, NOUVEAU_DMA_SKIPS);
- if (ret)
- return ret;
-
- for (i = 0; i < NOUVEAU_DMA_SKIPS; i++)
- OUT_RING(evo, 0);
-
- return 0;
-}
-
-static void
-nv50_evo_channel_fini(struct nouveau_channel *evo)
-{
- struct nouveau_drm *drm = evo->drm;
- struct nouveau_device *device = nv_device(drm->device);
- int id = evo->handle;
-
- nv_mask(device, 0x610028, 0x00010001 << id, 0x00000000);
- nv_mask(device, NV50_PDISPLAY_EVO_CTRL(id), 0x00001010, 0x00001000);
- nv_wr32(device, NV50_PDISPLAY_INTR_0, (1 << id));
- nv_mask(device, NV50_PDISPLAY_EVO_CTRL(id), 0x00000003, 0x00000000);
- if (!nv_wait(device, NV50_PDISPLAY_EVO_CTRL(id), 0x001e0000, 0x00000000)) {
- NV_ERROR(drm, "EvoCh %d takedown timeout: 0x%08x\n", id,
- nv_rd32(device, NV50_PDISPLAY_EVO_CTRL(id)));
- }
-}
-
-void
-nv50_evo_destroy(struct drm_device *dev)
-{
- struct nv50_display *disp = nv50_display(dev);
- int i;
-
- for (i = 0; i < 2; i++) {
- if (disp->crtc[i].sem.bo) {
- nouveau_bo_unmap(disp->crtc[i].sem.bo);
- nouveau_bo_ref(NULL, &disp->crtc[i].sem.bo);
- }
- nv50_evo_channel_del(&disp->crtc[i].sync);
- }
- nv50_evo_channel_del(&disp->master);
- nouveau_gpuobj_ref(NULL, &disp->ramin);
-}
-
-int
-nv50_evo_create(struct drm_device *dev)
-{
- struct nouveau_drm *drm = nouveau_drm(dev);
- struct nouveau_fb *pfb = nouveau_fb(drm->device);
- struct nv50_display *disp = nv50_display(dev);
- struct nouveau_channel *evo;
- int ret, i, j;
-
- /* setup object management on it, any other evo channel will
- * use this also as there's no per-channel support on the
- * hardware
- */
- ret = nouveau_gpuobj_new(drm->device, NULL, 32768, 65536,
- NVOBJ_FLAG_ZERO_ALLOC, &disp->ramin);
- if (ret) {
- NV_ERROR(drm, "Error allocating EVO channel memory: %d\n", ret);
- goto err;
- }
-
- disp->hash = 0x0000;
- disp->dmao = 0x1000;
-
- /* create primary evo channel, the one we use for modesetting
- * purporses
- */
- ret = nv50_evo_channel_new(dev, 0, &disp->master);
- if (ret)
- return ret;
- evo = disp->master;
-
- ret = nv50_evo_dmaobj_new(disp->master, NvEvoSync, 0x0000,
- disp->ramin->addr + 0x2000, 0x1000, NULL);
- if (ret)
- goto err;
-
- /* create some default objects for the scanout memtypes we support */
- ret = nv50_evo_dmaobj_new(disp->master, NvEvoVRAM, 0x0000,
- 0, pfb->ram.size, NULL);
- if (ret)
- goto err;
-
- ret = nv50_evo_dmaobj_new(disp->master, NvEvoVRAM_LP, 0x80000000,
- 0, pfb->ram.size, NULL);
- if (ret)
- goto err;
-
- ret = nv50_evo_dmaobj_new(disp->master, NvEvoFB32, 0x80000000 |
- (nv_device(drm->device)->chipset < 0xc0 ? 0x7a : 0xfe),
- 0, pfb->ram.size, NULL);
- if (ret)
- goto err;
-
- ret = nv50_evo_dmaobj_new(disp->master, NvEvoFB16, 0x80000000 |
- (nv_device(drm->device)->chipset < 0xc0 ? 0x70 : 0xfe),
- 0, pfb->ram.size, NULL);
- if (ret)
- goto err;
-
- /* create "display sync" channels and other structures we need
- * to implement page flipping
- */
- for (i = 0; i < 2; i++) {
- struct nv50_display_crtc *dispc = &disp->crtc[i];
- u64 offset;
-
- ret = nv50_evo_channel_new(dev, 1 + i, &dispc->sync);
- if (ret)
- goto err;
-
- ret = nouveau_bo_new(dev, 4096, 0x1000, TTM_PL_FLAG_VRAM,
- 0, 0x0000, NULL, &dispc->sem.bo);
- if (!ret) {
- ret = nouveau_bo_pin(dispc->sem.bo, TTM_PL_FLAG_VRAM);
- if (!ret)
- ret = nouveau_bo_map(dispc->sem.bo);
- if (ret)
- nouveau_bo_ref(NULL, &dispc->sem.bo);
- offset = dispc->sem.bo->bo.offset;
- }
-
- if (ret)
- goto err;
-
- ret = nv50_evo_dmaobj_new(dispc->sync, NvEvoSync, 0x0000,
- offset, 4096, NULL);
- if (ret)
- goto err;
-
- ret = nv50_evo_dmaobj_new(dispc->sync, NvEvoVRAM_LP, 0x80000000,
- 0, pfb->ram.size, NULL);
- if (ret)
- goto err;
-
- ret = nv50_evo_dmaobj_new(dispc->sync, NvEvoFB32, 0x80000000 |
- (nv_device(drm->device)->chipset < 0xc0 ?
- 0x7a : 0xfe),
- 0, pfb->ram.size, NULL);
- if (ret)
- goto err;
-
- ret = nv50_evo_dmaobj_new(dispc->sync, NvEvoFB16, 0x80000000 |
- (nv_device(drm->device)->chipset < 0xc0 ?
- 0x70 : 0xfe),
- 0, pfb->ram.size, NULL);
- if (ret)
- goto err;
-
- for (j = 0; j < 4096; j += 4)
- nouveau_bo_wr32(dispc->sem.bo, j / 4, 0x74b1e000);
- dispc->sem.offset = 0;
- }
-
- return 0;
-
-err:
- nv50_evo_destroy(dev);
- return ret;
-}
-
-int
-nv50_evo_init(struct drm_device *dev)
-{
- struct nv50_display *disp = nv50_display(dev);
- int ret, i;
-
- ret = nv50_evo_channel_init(disp->master);
- if (ret)
- return ret;
-
- for (i = 0; i < 2; i++) {
- ret = nv50_evo_channel_init(disp->crtc[i].sync);
- if (ret)
- return ret;
- }
-
- return 0;
-}
-
-void
-nv50_evo_fini(struct drm_device *dev)
-{
- struct nv50_display *disp = nv50_display(dev);
- int i;
-
- for (i = 0; i < 2; i++) {
- if (disp->crtc[i].sync)
- nv50_evo_channel_fini(disp->crtc[i].sync);
- }
-
- if (disp->master)
- nv50_evo_channel_fini(disp->master);
-}
diff --git a/drivers/gpu/drm/nouveau/nv50_evo.h b/drivers/gpu/drm/nouveau/nv50_evo.h
deleted file mode 100644
index 771d879bc834..000000000000
--- a/drivers/gpu/drm/nouveau/nv50_evo.h
+++ /dev/null
@@ -1,120 +0,0 @@
-/*
- * Copyright (C) 2008 Maarten Maathuis.
- * All Rights Reserved.
- *
- * Permission is hereby granted, free of charge, to any person obtaining
- * a copy of this software and associated documentation files (the
- * "Software"), to deal in the Software without restriction, including
- * without limitation the rights to use, copy, modify, merge, publish,
- * distribute, sublicense, and/or sell copies of the Software, and to
- * permit persons to whom the Software is furnished to do so, subject to
- * the following conditions:
- *
- * The above copyright notice and this permission notice (including the
- * next paragraph) shall be included in all copies or substantial
- * portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
- * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
- * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
- * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
- * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
- *
- */
-
-#ifndef __NV50_EVO_H__
-#define __NV50_EVO_H__
-
-#define NV50_EVO_UPDATE 0x00000080
-#define NV50_EVO_UNK84 0x00000084
-#define NV50_EVO_UNK84_NOTIFY 0x40000000
-#define NV50_EVO_UNK84_NOTIFY_DISABLED 0x00000000
-#define NV50_EVO_UNK84_NOTIFY_ENABLED 0x40000000
-#define NV50_EVO_DMA_NOTIFY 0x00000088
-#define NV50_EVO_DMA_NOTIFY_HANDLE 0xffffffff
-#define NV50_EVO_DMA_NOTIFY_HANDLE_NONE 0x00000000
-#define NV50_EVO_UNK8C 0x0000008C
-
-#define NV50_EVO_DAC(n, r) ((n) * 0x80 + NV50_EVO_DAC_##r)
-#define NV50_EVO_DAC_MODE_CTRL 0x00000400
-#define NV50_EVO_DAC_MODE_CTRL_CRTC0 0x00000001
-#define NV50_EVO_DAC_MODE_CTRL_CRTC1 0x00000002
-#define NV50_EVO_DAC_MODE_CTRL2 0x00000404
-#define NV50_EVO_DAC_MODE_CTRL2_NHSYNC 0x00000001
-#define NV50_EVO_DAC_MODE_CTRL2_NVSYNC 0x00000002
-
-#define NV50_EVO_SOR(n, r) ((n) * 0x40 + NV50_EVO_SOR_##r)
-#define NV50_EVO_SOR_MODE_CTRL 0x00000600
-#define NV50_EVO_SOR_MODE_CTRL_CRTC0 0x00000001
-#define NV50_EVO_SOR_MODE_CTRL_CRTC1 0x00000002
-#define NV50_EVO_SOR_MODE_CTRL_TMDS 0x00000100
-#define NV50_EVO_SOR_MODE_CTRL_TMDS_DUAL_LINK 0x00000400
-#define NV50_EVO_SOR_MODE_CTRL_NHSYNC 0x00001000
-#define NV50_EVO_SOR_MODE_CTRL_NVSYNC 0x00002000
-
-#define NV50_EVO_CRTC(n, r) ((n) * 0x400 + NV50_EVO_CRTC_##r)
-#define NV84_EVO_CRTC(n, r) ((n) * 0x400 + NV84_EVO_CRTC_##r)
-#define NV50_EVO_CRTC_UNK0800 0x00000800
-#define NV50_EVO_CRTC_CLOCK 0x00000804
-#define NV50_EVO_CRTC_INTERLACE 0x00000808
-#define NV50_EVO_CRTC_DISPLAY_START 0x00000810
-#define NV50_EVO_CRTC_DISPLAY_TOTAL 0x00000814
-#define NV50_EVO_CRTC_SYNC_DURATION 0x00000818
-#define NV50_EVO_CRTC_SYNC_START_TO_BLANK_END 0x0000081c
-#define NV50_EVO_CRTC_UNK0820 0x00000820
-#define NV50_EVO_CRTC_UNK0824 0x00000824
-#define NV50_EVO_CRTC_UNK082C 0x0000082c
-#define NV50_EVO_CRTC_CLUT_MODE 0x00000840
-/* You can't have a palette in 8 bit mode (=OFF) */
-#define NV50_EVO_CRTC_CLUT_MODE_BLANK 0x00000000
-#define NV50_EVO_CRTC_CLUT_MODE_OFF 0x80000000
-#define NV50_EVO_CRTC_CLUT_MODE_ON 0xC0000000
-#define NV50_EVO_CRTC_CLUT_OFFSET 0x00000844
-#define NV84_EVO_CRTC_CLUT_DMA 0x0000085C
-#define NV84_EVO_CRTC_CLUT_DMA_HANDLE 0xffffffff
-#define NV84_EVO_CRTC_CLUT_DMA_HANDLE_NONE 0x00000000
-#define NV50_EVO_CRTC_FB_OFFSET 0x00000860
-#define NV50_EVO_CRTC_FB_SIZE 0x00000868
-#define NV50_EVO_CRTC_FB_CONFIG 0x0000086c
-#define NV50_EVO_CRTC_FB_CONFIG_MODE 0x00100000
-#define NV50_EVO_CRTC_FB_CONFIG_MODE_TILE 0x00000000
-#define NV50_EVO_CRTC_FB_CONFIG_MODE_PITCH 0x00100000
-#define NV50_EVO_CRTC_FB_DEPTH 0x00000870
-#define NV50_EVO_CRTC_FB_DEPTH_8 0x00001e00
-#define NV50_EVO_CRTC_FB_DEPTH_15 0x0000e900
-#define NV50_EVO_CRTC_FB_DEPTH_16 0x0000e800
-#define NV50_EVO_CRTC_FB_DEPTH_24 0x0000cf00
-#define NV50_EVO_CRTC_FB_DEPTH_30 0x0000d100
-#define NV50_EVO_CRTC_FB_DMA 0x00000874
-#define NV50_EVO_CRTC_FB_DMA_HANDLE 0xffffffff
-#define NV50_EVO_CRTC_FB_DMA_HANDLE_NONE 0x00000000
-#define NV50_EVO_CRTC_CURSOR_CTRL 0x00000880
-#define NV50_EVO_CRTC_CURSOR_CTRL_HIDE 0x05000000
-#define NV50_EVO_CRTC_CURSOR_CTRL_SHOW 0x85000000
-#define NV50_EVO_CRTC_CURSOR_OFFSET 0x00000884
-#define NV84_EVO_CRTC_CURSOR_DMA 0x0000089c
-#define NV84_EVO_CRTC_CURSOR_DMA_HANDLE 0xffffffff
-#define NV84_EVO_CRTC_CURSOR_DMA_HANDLE_NONE 0x00000000
-#define NV50_EVO_CRTC_DITHER_CTRL 0x000008a0
-#define NV50_EVO_CRTC_DITHER_CTRL_OFF 0x00000000
-#define NV50_EVO_CRTC_DITHER_CTRL_ON 0x00000011
-#define NV50_EVO_CRTC_SCALE_CTRL 0x000008a4
-#define NV50_EVO_CRTC_SCALE_CTRL_INACTIVE 0x00000000
-#define NV50_EVO_CRTC_SCALE_CTRL_ACTIVE 0x00000009
-#define NV50_EVO_CRTC_COLOR_CTRL 0x000008a8
-#define NV50_EVO_CRTC_COLOR_CTRL_VIBRANCE 0x000fff00
-#define NV50_EVO_CRTC_COLOR_CTRL_HUE 0xfff00000
-#define NV50_EVO_CRTC_FB_POS 0x000008c0
-#define NV50_EVO_CRTC_REAL_RES 0x000008c8
-#define NV50_EVO_CRTC_SCALE_CENTER_OFFSET 0x000008d4
-#define NV50_EVO_CRTC_SCALE_CENTER_OFFSET_VAL(x, y) \
- ((((unsigned)y << 16) & 0xFFFF0000) | (((unsigned)x) & 0x0000FFFF))
-/* Both of these are needed, otherwise nothing happens. */
-#define NV50_EVO_CRTC_SCALE_RES1 0x000008d8
-#define NV50_EVO_CRTC_SCALE_RES2 0x000008dc
-#define NV50_EVO_CRTC_UNK900 0x00000900
-#define NV50_EVO_CRTC_UNK904 0x00000904
-
-#endif
diff --git a/drivers/gpu/drm/nouveau/nv50_fence.c b/drivers/gpu/drm/nouveau/nv50_fence.c
index e0763ea88ee2..c20f2727ea0b 100644
--- a/drivers/gpu/drm/nouveau/nv50_fence.c
+++ b/drivers/gpu/drm/nouveau/nv50_fence.c
@@ -110,8 +110,11 @@ nv50_fence_create(struct nouveau_drm *drm)
0, 0x0000, NULL, &priv->bo);
if (!ret) {
ret = nouveau_bo_pin(priv->bo, TTM_PL_FLAG_VRAM);
- if (!ret)
+ if (!ret) {
ret = nouveau_bo_map(priv->bo);
+ if (ret)
+ nouveau_bo_unpin(priv->bo);
+ }
if (ret)
nouveau_bo_ref(NULL, &priv->bo);
}
diff --git a/drivers/gpu/drm/nouveau/nv50_pm.c b/drivers/gpu/drm/nouveau/nv50_pm.c
index c4a65039b1ca..8bd5d2781baf 100644
--- a/drivers/gpu/drm/nouveau/nv50_pm.c
+++ b/drivers/gpu/drm/nouveau/nv50_pm.c
@@ -546,7 +546,7 @@ calc_mclk(struct drm_device *dev, struct nouveau_pm_level *perflvl,
{
struct nouveau_drm *drm = nouveau_drm(dev);
struct nouveau_device *device = nouveau_dev(dev);
- u32 crtc_mask = nv50_display_active_crtcs(dev);
+ u32 crtc_mask = 0; /*XXX: nv50_display_active_crtcs(dev); */
struct nouveau_mem_exec_func exec = {
.dev = dev,
.precharge = mclk_precharge,
diff --git a/drivers/gpu/drm/nouveau/nv50_sor.c b/drivers/gpu/drm/nouveau/nv50_sor.c
deleted file mode 100644
index b562b59e1326..000000000000
--- a/drivers/gpu/drm/nouveau/nv50_sor.c
+++ /dev/null
@@ -1,530 +0,0 @@
-/*
- * Copyright (C) 2008 Maarten Maathuis.
- * All Rights Reserved.
- *
- * Permission is hereby granted, free of charge, to any person obtaining
- * a copy of this software and associated documentation files (the
- * "Software"), to deal in the Software without restriction, including
- * without limitation the rights to use, copy, modify, merge, publish,
- * distribute, sublicense, and/or sell copies of the Software, and to
- * permit persons to whom the Software is furnished to do so, subject to
- * the following conditions:
- *
- * The above copyright notice and this permission notice (including the
- * next paragraph) shall be included in all copies or substantial
- * portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
- * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
- * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
- * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
- * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
- *
- */
-
-#include <drm/drmP.h>
-#include <drm/drm_crtc_helper.h>
-
-#define NOUVEAU_DMA_DEBUG (nouveau_reg_debug & NOUVEAU_REG_DEBUG_EVO)
-#include "nouveau_reg.h"
-#include "nouveau_drm.h"
-#include "nouveau_dma.h"
-#include "nouveau_encoder.h"
-#include "nouveau_connector.h"
-#include "nouveau_crtc.h"
-#include "nv50_display.h"
-
-#include <subdev/timer.h>
-
-static u32
-nv50_sor_dp_lane_map(struct drm_device *dev, struct dcb_output *dcb, u8 lane)
-{
- struct nouveau_drm *drm = nouveau_drm(dev);
- static const u8 nvaf[] = { 24, 16, 8, 0 }; /* thanks, apple.. */
- static const u8 nv50[] = { 16, 8, 0, 24 };
- if (nv_device(drm->device)->chipset == 0xaf)
- return nvaf[lane];
- return nv50[lane];
-}
-
-static void
-nv50_sor_dp_train_set(struct drm_device *dev, struct dcb_output *dcb, u8 pattern)
-{
- struct nouveau_device *device = nouveau_dev(dev);
- u32 or = ffs(dcb->or) - 1, link = !(dcb->sorconf.link & 1);
- nv_mask(device, NV50_SOR_DP_CTRL(or, link), 0x0f000000, pattern << 24);
-}
-
-static void
-nv50_sor_dp_train_adj(struct drm_device *dev, struct dcb_output *dcb,
- u8 lane, u8 swing, u8 preem)
-{
- struct nouveau_device *device = nouveau_dev(dev);
- struct nouveau_drm *drm = nouveau_drm(dev);
- u32 or = ffs(dcb->or) - 1, link = !(dcb->sorconf.link & 1);
- u32 shift = nv50_sor_dp_lane_map(dev, dcb, lane);
- u32 mask = 0x000000ff << shift;
- u8 *table, *entry, *config;
-
- table = nouveau_dp_bios_data(dev, dcb, &entry);
- if (!table || (table[0] != 0x20 && table[0] != 0x21)) {
- NV_ERROR(drm, "PDISP: unsupported DP table for chipset\n");
- return;
- }
-
- config = entry + table[4];
- while (config[0] != swing || config[1] != preem) {
- config += table[5];
- if (config >= entry + table[4] + entry[4] * table[5])
- return;
- }
-
- nv_mask(device, NV50_SOR_DP_UNK118(or, link), mask, config[2] << shift);
- nv_mask(device, NV50_SOR_DP_UNK120(or, link), mask, config[3] << shift);
- nv_mask(device, NV50_SOR_DP_UNK130(or, link), 0x0000ff00, config[4] << 8);
-}
-
-static void
-nv50_sor_dp_link_set(struct drm_device *dev, struct dcb_output *dcb, int crtc,
- int link_nr, u32 link_bw, bool enhframe)
-{
- struct nouveau_device *device = nouveau_dev(dev);
- struct nouveau_drm *drm = nouveau_drm(dev);
- u32 or = ffs(dcb->or) - 1, link = !(dcb->sorconf.link & 1);
- u32 dpctrl = nv_rd32(device, NV50_SOR_DP_CTRL(or, link)) & ~0x001f4000;
- u32 clksor = nv_rd32(device, 0x614300 + (or * 0x800)) & ~0x000c0000;
- u8 *table, *entry, mask;
- int i;
-
- table = nouveau_dp_bios_data(dev, dcb, &entry);
- if (!table || (table[0] != 0x20 && table[0] != 0x21)) {
- NV_ERROR(drm, "PDISP: unsupported DP table for chipset\n");
- return;
- }
-
- entry = ROMPTR(dev, entry[10]);
- if (entry) {
- while (link_bw < ROM16(entry[0]) * 10)
- entry += 4;
-
- nouveau_bios_run_init_table(dev, ROM16(entry[2]), dcb, crtc);
- }
-
- dpctrl |= ((1 << link_nr) - 1) << 16;
- if (enhframe)
- dpctrl |= 0x00004000;
-
- if (link_bw > 162000)
- clksor |= 0x00040000;
-
- nv_wr32(device, 0x614300 + (or * 0x800), clksor);
- nv_wr32(device, NV50_SOR_DP_CTRL(or, link), dpctrl);
-
- mask = 0;
- for (i = 0; i < link_nr; i++)
- mask |= 1 << (nv50_sor_dp_lane_map(dev, dcb, i) >> 3);
- nv_mask(device, NV50_SOR_DP_UNK130(or, link), 0x0000000f, mask);
-}
-
-static void
-nv50_sor_dp_link_get(struct drm_device *dev, u32 or, u32 link, u32 *nr, u32 *bw)
-{
- struct nouveau_device *device = nouveau_dev(dev);
- u32 dpctrl = nv_rd32(device, NV50_SOR_DP_CTRL(or, link)) & 0x000f0000;
- u32 clksor = nv_rd32(device, 0x614300 + (or * 0x800));
- if (clksor & 0x000c0000)
- *bw = 270000;
- else
- *bw = 162000;
-
- if (dpctrl > 0x00030000) *nr = 4;
- else if (dpctrl > 0x00010000) *nr = 2;
- else *nr = 1;
-}
-
-void
-nv50_sor_dp_calc_tu(struct drm_device *dev, int or, int link, u32 clk, u32 bpp)
-{
- struct nouveau_device *device = nouveau_dev(dev);
- struct nouveau_drm *drm = nouveau_drm(dev);
- const u32 symbol = 100000;
- int bestTU = 0, bestVTUi = 0, bestVTUf = 0, bestVTUa = 0;
- int TU, VTUi, VTUf, VTUa;
- u64 link_data_rate, link_ratio, unk;
- u32 best_diff = 64 * symbol;
- u32 link_nr, link_bw, r;
-
- /* calculate packed data rate for each lane */
- nv50_sor_dp_link_get(dev, or, link, &link_nr, &link_bw);
- link_data_rate = (clk * bpp / 8) / link_nr;
-
- /* calculate ratio of packed data rate to link symbol rate */
- link_ratio = link_data_rate * symbol;
- r = do_div(link_ratio, link_bw);
-
- for (TU = 64; TU >= 32; TU--) {
- /* calculate average number of valid symbols in each TU */
- u32 tu_valid = link_ratio * TU;
- u32 calc, diff;
-
- /* find a hw representation for the fraction.. */
- VTUi = tu_valid / symbol;
- calc = VTUi * symbol;
- diff = tu_valid - calc;
- if (diff) {
- if (diff >= (symbol / 2)) {
- VTUf = symbol / (symbol - diff);
- if (symbol - (VTUf * diff))
- VTUf++;
-
- if (VTUf <= 15) {
- VTUa = 1;
- calc += symbol - (symbol / VTUf);
- } else {
- VTUa = 0;
- VTUf = 1;
- calc += symbol;
- }
- } else {
- VTUa = 0;
- VTUf = min((int)(symbol / diff), 15);
- calc += symbol / VTUf;
- }
-
- diff = calc - tu_valid;
- } else {
- /* no remainder, but the hw doesn't like the fractional
- * part to be zero. decrement the integer part and
- * have the fraction add a whole symbol back
- */
- VTUa = 0;
- VTUf = 1;
- VTUi--;
- }
-
- if (diff < best_diff) {
- best_diff = diff;
- bestTU = TU;
- bestVTUa = VTUa;
- bestVTUf = VTUf;
- bestVTUi = VTUi;
- if (diff == 0)
- break;
- }
- }
-
- if (!bestTU) {
- NV_ERROR(drm, "DP: unable to find suitable config\n");
- return;
- }
-
- /* XXX close to vbios numbers, but not right */
- unk = (symbol - link_ratio) * bestTU;
- unk *= link_ratio;
- r = do_div(unk, symbol);
- r = do_div(unk, symbol);
- unk += 6;
-
- nv_mask(device, NV50_SOR_DP_CTRL(or, link), 0x000001fc, bestTU << 2);
- nv_mask(device, NV50_SOR_DP_SCFG(or, link), 0x010f7f3f, bestVTUa << 24 |
- bestVTUf << 16 |
- bestVTUi << 8 |
- unk);
-}
-static void
-nv50_sor_disconnect(struct drm_encoder *encoder)
-{
- struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
- struct nouveau_drm *drm = nouveau_drm(encoder->dev);
- struct drm_device *dev = encoder->dev;
- struct nouveau_channel *evo = nv50_display(dev)->master;
- int ret;
-
- if (!nv_encoder->crtc)
- return;
- nv50_crtc_blank(nouveau_crtc(nv_encoder->crtc), true);
-
- NV_DEBUG(drm, "Disconnecting SOR %d\n", nv_encoder->or);
-
- ret = RING_SPACE(evo, 4);
- if (ret) {
- NV_ERROR(drm, "no space while disconnecting SOR\n");
- return;
- }
- BEGIN_NV04(evo, 0, NV50_EVO_SOR(nv_encoder->or, MODE_CTRL), 1);
- OUT_RING (evo, 0);
- BEGIN_NV04(evo, 0, NV50_EVO_UPDATE, 1);
- OUT_RING (evo, 0);
-
- nouveau_hdmi_mode_set(encoder, NULL);
-
- nv_encoder->crtc = NULL;
- nv_encoder->last_dpms = DRM_MODE_DPMS_OFF;
-}
-
-static void
-nv50_sor_dpms(struct drm_encoder *encoder, int mode)
-{
- struct nouveau_device *device = nouveau_dev(encoder->dev);
- struct nouveau_drm *drm = nouveau_drm(encoder->dev);
- struct drm_device *dev = encoder->dev;
- struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
- struct drm_encoder *enc;
- uint32_t val;
- int or = nv_encoder->or;
-
- NV_DEBUG(drm, "or %d type %d mode %d\n", or, nv_encoder->dcb->type, mode);
-
- nv_encoder->last_dpms = mode;
- list_for_each_entry(enc, &dev->mode_config.encoder_list, head) {
- struct nouveau_encoder *nvenc = nouveau_encoder(enc);
-
- if (nvenc == nv_encoder ||
- (nvenc->dcb->type != DCB_OUTPUT_TMDS &&
- nvenc->dcb->type != DCB_OUTPUT_LVDS &&
- nvenc->dcb->type != DCB_OUTPUT_DP) ||
- nvenc->dcb->or != nv_encoder->dcb->or)
- continue;
-
- if (nvenc->last_dpms == DRM_MODE_DPMS_ON)
- return;
- }
-
- /* wait for it to be done */
- if (!nv_wait(device, NV50_PDISPLAY_SOR_DPMS_CTRL(or),
- NV50_PDISPLAY_SOR_DPMS_CTRL_PENDING, 0)) {
- NV_ERROR(drm, "timeout: SOR_DPMS_CTRL_PENDING(%d) == 0\n", or);
- NV_ERROR(drm, "SOR_DPMS_CTRL(%d) = 0x%08x\n", or,
- nv_rd32(device, NV50_PDISPLAY_SOR_DPMS_CTRL(or)));
- }
-
- val = nv_rd32(device, NV50_PDISPLAY_SOR_DPMS_CTRL(or));
-
- if (mode == DRM_MODE_DPMS_ON)
- val |= NV50_PDISPLAY_SOR_DPMS_CTRL_ON;
- else
- val &= ~NV50_PDISPLAY_SOR_DPMS_CTRL_ON;
-
- nv_wr32(device, NV50_PDISPLAY_SOR_DPMS_CTRL(or), val |
- NV50_PDISPLAY_SOR_DPMS_CTRL_PENDING);
- if (!nv_wait(device, NV50_PDISPLAY_SOR_DPMS_STATE(or),
- NV50_PDISPLAY_SOR_DPMS_STATE_WAIT, 0)) {
- NV_ERROR(drm, "timeout: SOR_DPMS_STATE_WAIT(%d) == 0\n", or);
- NV_ERROR(drm, "SOR_DPMS_STATE(%d) = 0x%08x\n", or,
- nv_rd32(device, NV50_PDISPLAY_SOR_DPMS_STATE(or)));
- }
-
- if (nv_encoder->dcb->type == DCB_OUTPUT_DP) {
- struct dp_train_func func = {
- .link_set = nv50_sor_dp_link_set,
- .train_set = nv50_sor_dp_train_set,
- .train_adj = nv50_sor_dp_train_adj
- };
-
- nouveau_dp_dpms(encoder, mode, nv_encoder->dp.datarate, &func);
- }
-}
-
-static void
-nv50_sor_save(struct drm_encoder *encoder)
-{
- struct nouveau_drm *drm = nouveau_drm(encoder->dev);
- NV_ERROR(drm, "!!\n");
-}
-
-static void
-nv50_sor_restore(struct drm_encoder *encoder)
-{
- struct nouveau_drm *drm = nouveau_drm(encoder->dev);
- NV_ERROR(drm, "!!\n");
-}
-
-static bool
-nv50_sor_mode_fixup(struct drm_encoder *encoder,
- const struct drm_display_mode *mode,
- struct drm_display_mode *adjusted_mode)
-{
- struct nouveau_drm *drm = nouveau_drm(encoder->dev);
- struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
- struct nouveau_connector *connector;
-
- NV_DEBUG(drm, "or %d\n", nv_encoder->or);
-
- connector = nouveau_encoder_connector_get(nv_encoder);
- if (!connector) {
- NV_ERROR(drm, "Encoder has no connector\n");
- return false;
- }
-
- if (connector->scaling_mode != DRM_MODE_SCALE_NONE &&
- connector->native_mode)
- drm_mode_copy(adjusted_mode, connector->native_mode);
-
- return true;
-}
-
-static void
-nv50_sor_prepare(struct drm_encoder *encoder)
-{
- struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
- nv50_sor_disconnect(encoder);
- if (nv_encoder->dcb->type == DCB_OUTPUT_DP) {
- /* avoid race between link training and supervisor intr */
- nv50_display_sync(encoder->dev);
- }
-}
-
-static void
-nv50_sor_commit(struct drm_encoder *encoder)
-{
-}
-
-static void
-nv50_sor_mode_set(struct drm_encoder *encoder, struct drm_display_mode *umode,
- struct drm_display_mode *mode)
-{
- struct nouveau_channel *evo = nv50_display(encoder->dev)->master;
- struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
- struct nouveau_drm *drm = nouveau_drm(encoder->dev);
- struct nouveau_crtc *crtc = nouveau_crtc(encoder->crtc);
- struct nouveau_connector *nv_connector;
- uint32_t mode_ctl = 0;
- int ret;
-
- NV_DEBUG(drm, "or %d type %d -> crtc %d\n",
- nv_encoder->or, nv_encoder->dcb->type, crtc->index);
- nv_encoder->crtc = encoder->crtc;
-
- switch (nv_encoder->dcb->type) {
- case DCB_OUTPUT_TMDS:
- if (nv_encoder->dcb->sorconf.link & 1) {
- if (mode->clock < 165000)
- mode_ctl = 0x0100;
- else
- mode_ctl = 0x0500;
- } else
- mode_ctl = 0x0200;
-
- nouveau_hdmi_mode_set(encoder, mode);
- break;
- case DCB_OUTPUT_DP:
- nv_connector = nouveau_encoder_connector_get(nv_encoder);
- if (nv_connector && nv_connector->base.display_info.bpc == 6) {
- nv_encoder->dp.datarate = mode->clock * 18 / 8;
- mode_ctl |= 0x00020000;
- } else {
- nv_encoder->dp.datarate = mode->clock * 24 / 8;
- mode_ctl |= 0x00050000;
- }
-
- if (nv_encoder->dcb->sorconf.link & 1)
- mode_ctl |= 0x00000800;
- else
- mode_ctl |= 0x00000900;
- break;
- default:
- break;
- }
-
- if (crtc->index == 1)
- mode_ctl |= NV50_EVO_SOR_MODE_CTRL_CRTC1;
- else
- mode_ctl |= NV50_EVO_SOR_MODE_CTRL_CRTC0;
-
- if (mode->flags & DRM_MODE_FLAG_NHSYNC)
- mode_ctl |= NV50_EVO_SOR_MODE_CTRL_NHSYNC;
-
- if (mode->flags & DRM_MODE_FLAG_NVSYNC)
- mode_ctl |= NV50_EVO_SOR_MODE_CTRL_NVSYNC;
-
- nv50_sor_dpms(encoder, DRM_MODE_DPMS_ON);
-
- ret = RING_SPACE(evo, 2);
- if (ret) {
- NV_ERROR(drm, "no space while connecting SOR\n");
- nv_encoder->crtc = NULL;
- return;
- }
- BEGIN_NV04(evo, 0, NV50_EVO_SOR(nv_encoder->or, MODE_CTRL), 1);
- OUT_RING(evo, mode_ctl);
-}
-
-static struct drm_crtc *
-nv50_sor_crtc_get(struct drm_encoder *encoder)
-{
- return nouveau_encoder(encoder)->crtc;
-}
-
-static const struct drm_encoder_helper_funcs nv50_sor_helper_funcs = {
- .dpms = nv50_sor_dpms,
- .save = nv50_sor_save,
- .restore = nv50_sor_restore,
- .mode_fixup = nv50_sor_mode_fixup,
- .prepare = nv50_sor_prepare,
- .commit = nv50_sor_commit,
- .mode_set = nv50_sor_mode_set,
- .get_crtc = nv50_sor_crtc_get,
- .detect = NULL,
- .disable = nv50_sor_disconnect
-};
-
-static void
-nv50_sor_destroy(struct drm_encoder *encoder)
-{
- struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
- struct nouveau_drm *drm = nouveau_drm(encoder->dev);
-
- NV_DEBUG(drm, "\n");
-
- drm_encoder_cleanup(encoder);
-
- kfree(nv_encoder);
-}
-
-static const struct drm_encoder_funcs nv50_sor_encoder_funcs = {
- .destroy = nv50_sor_destroy,
-};
-
-int
-nv50_sor_create(struct drm_connector *connector, struct dcb_output *entry)
-{
- struct nouveau_encoder *nv_encoder = NULL;
- struct drm_device *dev = connector->dev;
- struct nouveau_drm *drm = nouveau_drm(dev);
- struct drm_encoder *encoder;
- int type;
-
- NV_DEBUG(drm, "\n");
-
- switch (entry->type) {
- case DCB_OUTPUT_TMDS:
- case DCB_OUTPUT_DP:
- type = DRM_MODE_ENCODER_TMDS;
- break;
- case DCB_OUTPUT_LVDS:
- type = DRM_MODE_ENCODER_LVDS;
- break;
- default:
- return -EINVAL;
- }
-
- nv_encoder = kzalloc(sizeof(*nv_encoder), GFP_KERNEL);
- if (!nv_encoder)
- return -ENOMEM;
- encoder = to_drm_encoder(nv_encoder);
-
- nv_encoder->dcb = entry;
- nv_encoder->or = ffs(entry->or) - 1;
- nv_encoder->last_dpms = DRM_MODE_DPMS_OFF;
-
- drm_encoder_init(dev, encoder, &nv50_sor_encoder_funcs, type);
- drm_encoder_helper_add(encoder, &nv50_sor_helper_funcs);
-
- encoder->possible_crtcs = entry->heads;
- encoder->possible_clones = 0;
-
- drm_mode_connector_attach_encoder(connector, encoder);
- return 0;
-}
diff --git a/drivers/gpu/drm/nouveau/nvc0_fence.c b/drivers/gpu/drm/nouveau/nvc0_fence.c
index 53299eac9676..2a56b1b551cb 100644
--- a/drivers/gpu/drm/nouveau/nvc0_fence.c
+++ b/drivers/gpu/drm/nouveau/nvc0_fence.c
@@ -114,17 +114,9 @@ nvc0_fence_context_del(struct nouveau_channel *chan)
struct nvc0_fence_chan *fctx = chan->fence;
int i;
- if (nv_device(chan->drm->device)->card_type >= NV_D0) {
- for (i = 0; i < dev->mode_config.num_crtc; i++) {
- struct nouveau_bo *bo = nvd0_display_crtc_sema(dev, i);
- nouveau_bo_vma_del(bo, &fctx->dispc_vma[i]);
- }
- } else
- if (nv_device(chan->drm->device)->card_type >= NV_50) {
- for (i = 0; i < dev->mode_config.num_crtc; i++) {
- struct nouveau_bo *bo = nv50_display_crtc_sema(dev, i);
- nouveau_bo_vma_del(bo, &fctx->dispc_vma[i]);
- }
+ for (i = 0; i < dev->mode_config.num_crtc; i++) {
+ struct nouveau_bo *bo = nv50_display_crtc_sema(dev, i);
+ nouveau_bo_vma_del(bo, &fctx->dispc_vma[i]);
}
nouveau_bo_vma_del(priv->bo, &fctx->vma);
@@ -154,12 +146,7 @@ nvc0_fence_context_new(struct nouveau_channel *chan)
/* map display semaphore buffers into channel's vm */
for (i = 0; !ret && i < chan->drm->dev->mode_config.num_crtc; i++) {
- struct nouveau_bo *bo;
- if (nv_device(chan->drm->device)->card_type >= NV_D0)
- bo = nvd0_display_crtc_sema(chan->drm->dev, i);
- else
- bo = nv50_display_crtc_sema(chan->drm->dev, i);
-
+ struct nouveau_bo *bo = nv50_display_crtc_sema(chan->drm->dev, i);
ret = nouveau_bo_vma_add(bo, client->vm, &fctx->dispc_vma[i]);
}
@@ -203,6 +190,8 @@ nvc0_fence_destroy(struct nouveau_drm *drm)
{
struct nvc0_fence_priv *priv = drm->fence;
nouveau_bo_unmap(priv->bo);
+ if (priv->bo)
+ nouveau_bo_unpin(priv->bo);
nouveau_bo_ref(NULL, &priv->bo);
drm->fence = NULL;
kfree(priv);
@@ -232,8 +221,11 @@ nvc0_fence_create(struct nouveau_drm *drm)
TTM_PL_FLAG_VRAM, 0, 0, NULL, &priv->bo);
if (ret == 0) {
ret = nouveau_bo_pin(priv->bo, TTM_PL_FLAG_VRAM);
- if (ret == 0)
+ if (ret == 0) {
ret = nouveau_bo_map(priv->bo);
+ if (ret)
+ nouveau_bo_unpin(priv->bo);
+ }
if (ret)
nouveau_bo_ref(NULL, &priv->bo);
}
diff --git a/drivers/gpu/drm/nouveau/nvd0_display.c b/drivers/gpu/drm/nouveau/nvd0_display.c
deleted file mode 100644
index c402fca2b2b8..000000000000
--- a/drivers/gpu/drm/nouveau/nvd0_display.c
+++ /dev/null
@@ -1,2141 +0,0 @@
-/*
- * Copyright 2011 Red Hat Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- * Authors: Ben Skeggs
- */
-
-#include <linux/dma-mapping.h>
-
-#include <drm/drmP.h>
-#include <drm/drm_crtc_helper.h>
-
-#include "nouveau_drm.h"
-#include "nouveau_dma.h"
-#include "nouveau_gem.h"
-#include "nouveau_connector.h"
-#include "nouveau_encoder.h"
-#include "nouveau_crtc.h"
-#include "nouveau_fence.h"
-#include "nv50_display.h"
-
-#include <core/gpuobj.h>
-
-#include <subdev/timer.h>
-#include <subdev/bar.h>
-#include <subdev/fb.h>
-
-#define EVO_DMA_NR 9
-
-#define EVO_MASTER (0x00)
-#define EVO_FLIP(c) (0x01 + (c))
-#define EVO_OVLY(c) (0x05 + (c))
-#define EVO_OIMM(c) (0x09 + (c))
-#define EVO_CURS(c) (0x0d + (c))
-
-/* offsets in shared sync bo of various structures */
-#define EVO_SYNC(c, o) ((c) * 0x0100 + (o))
-#define EVO_MAST_NTFY EVO_SYNC( 0, 0x00)
-#define EVO_FLIP_SEM0(c) EVO_SYNC((c), 0x00)
-#define EVO_FLIP_SEM1(c) EVO_SYNC((c), 0x10)
-
-struct evo {
- int idx;
- dma_addr_t handle;
- u32 *ptr;
- struct {
- u32 offset;
- u16 value;
- } sem;
-};
-
-struct nvd0_display {
- struct nouveau_gpuobj *mem;
- struct nouveau_bo *sync;
- struct evo evo[9];
-
- struct tasklet_struct tasklet;
- u32 modeset;
-};
-
-static struct nvd0_display *
-nvd0_display(struct drm_device *dev)
-{
- return nouveau_display(dev)->priv;
-}
-
-static struct drm_crtc *
-nvd0_display_crtc_get(struct drm_encoder *encoder)
-{
- return nouveau_encoder(encoder)->crtc;
-}
-
-/******************************************************************************
- * EVO channel helpers
- *****************************************************************************/
-static inline int
-evo_icmd(struct drm_device *dev, int id, u32 mthd, u32 data)
-{
- struct nouveau_device *device = nouveau_dev(dev);
- int ret = 0;
- nv_mask(device, 0x610700 + (id * 0x10), 0x00000001, 0x00000001);
- nv_wr32(device, 0x610704 + (id * 0x10), data);
- nv_mask(device, 0x610704 + (id * 0x10), 0x80000ffc, 0x80000000 | mthd);
- if (!nv_wait(device, 0x610704 + (id * 0x10), 0x80000000, 0x00000000))
- ret = -EBUSY;
- nv_mask(device, 0x610700 + (id * 0x10), 0x00000001, 0x00000000);
- return ret;
-}
-
-static u32 *
-evo_wait(struct drm_device *dev, int id, int nr)
-{
- struct nouveau_device *device = nouveau_dev(dev);
- struct nouveau_drm *drm = nouveau_drm(dev);
- struct nvd0_display *disp = nvd0_display(dev);
- u32 put = nv_rd32(device, 0x640000 + (id * 0x1000)) / 4;
-
- if (put + nr >= (PAGE_SIZE / 4)) {
- disp->evo[id].ptr[put] = 0x20000000;
-
- nv_wr32(device, 0x640000 + (id * 0x1000), 0x00000000);
- if (!nv_wait(device, 0x640004 + (id * 0x1000), ~0, 0x00000000)) {
- NV_ERROR(drm, "evo %d dma stalled\n", id);
- return NULL;
- }
-
- put = 0;
- }
-
- return disp->evo[id].ptr + put;
-}
-
-static void
-evo_kick(u32 *push, struct drm_device *dev, int id)
-{
- struct nouveau_device *device = nouveau_dev(dev);
- struct nvd0_display *disp = nvd0_display(dev);
-
- nv_wr32(device, 0x640000 + (id * 0x1000), (push - disp->evo[id].ptr) << 2);
-}
-
-#define evo_mthd(p,m,s) *((p)++) = (((s) << 18) | (m))
-#define evo_data(p,d) *((p)++) = (d)
-
-static int
-evo_init_dma(struct drm_device *dev, int ch)
-{
- struct nouveau_device *device = nouveau_dev(dev);
- struct nouveau_drm *drm = nouveau_drm(dev);
- struct nvd0_display *disp = nvd0_display(dev);
- u32 flags;
-
- flags = 0x00000000;
- if (ch == EVO_MASTER)
- flags |= 0x01000000;
-
- nv_wr32(device, 0x610494 + (ch * 0x0010), (disp->evo[ch].handle >> 8) | 3);
- nv_wr32(device, 0x610498 + (ch * 0x0010), 0x00010000);
- nv_wr32(device, 0x61049c + (ch * 0x0010), 0x00000001);
- nv_mask(device, 0x610490 + (ch * 0x0010), 0x00000010, 0x00000010);
- nv_wr32(device, 0x640000 + (ch * 0x1000), 0x00000000);
- nv_wr32(device, 0x610490 + (ch * 0x0010), 0x00000013 | flags);
- if (!nv_wait(device, 0x610490 + (ch * 0x0010), 0x80000000, 0x00000000)) {
- NV_ERROR(drm, "PDISP: ch%d 0x%08x\n", ch,
- nv_rd32(device, 0x610490 + (ch * 0x0010)));
- return -EBUSY;
- }
-
- nv_mask(device, 0x610090, (1 << ch), (1 << ch));
- nv_mask(device, 0x6100a0, (1 << ch), (1 << ch));
- return 0;
-}
-
-static void
-evo_fini_dma(struct drm_device *dev, int ch)
-{
- struct nouveau_device *device = nouveau_dev(dev);
-
- if (!(nv_rd32(device, 0x610490 + (ch * 0x0010)) & 0x00000010))
- return;
-
- nv_mask(device, 0x610490 + (ch * 0x0010), 0x00000010, 0x00000000);
- nv_mask(device, 0x610490 + (ch * 0x0010), 0x00000003, 0x00000000);
- nv_wait(device, 0x610490 + (ch * 0x0010), 0x80000000, 0x00000000);
- nv_mask(device, 0x610090, (1 << ch), 0x00000000);
- nv_mask(device, 0x6100a0, (1 << ch), 0x00000000);
-}
-
-static inline void
-evo_piow(struct drm_device *dev, int ch, u16 mthd, u32 data)
-{
- struct nouveau_device *device = nouveau_dev(dev);
- nv_wr32(device, 0x640000 + (ch * 0x1000) + mthd, data);
-}
-
-static int
-evo_init_pio(struct drm_device *dev, int ch)
-{
- struct nouveau_device *device = nouveau_dev(dev);
- struct nouveau_drm *drm = nouveau_drm(dev);
-
- nv_wr32(device, 0x610490 + (ch * 0x0010), 0x00000001);
- if (!nv_wait(device, 0x610490 + (ch * 0x0010), 0x00010000, 0x00010000)) {
- NV_ERROR(drm, "PDISP: ch%d 0x%08x\n", ch,
- nv_rd32(device, 0x610490 + (ch * 0x0010)));
- return -EBUSY;
- }
-
- nv_mask(device, 0x610090, (1 << ch), (1 << ch));
- nv_mask(device, 0x6100a0, (1 << ch), (1 << ch));
- return 0;
-}
-
-static void
-evo_fini_pio(struct drm_device *dev, int ch)
-{
- struct nouveau_device *device = nouveau_dev(dev);
-
- if (!(nv_rd32(device, 0x610490 + (ch * 0x0010)) & 0x00000001))
- return;
-
- nv_mask(device, 0x610490 + (ch * 0x0010), 0x00000010, 0x00000010);
- nv_mask(device, 0x610490 + (ch * 0x0010), 0x00000001, 0x00000000);
- nv_wait(device, 0x610490 + (ch * 0x0010), 0x00010000, 0x00000000);
- nv_mask(device, 0x610090, (1 << ch), 0x00000000);
- nv_mask(device, 0x6100a0, (1 << ch), 0x00000000);
-}
-
-static bool
-evo_sync_wait(void *data)
-{
- return nouveau_bo_rd32(data, EVO_MAST_NTFY) != 0x00000000;
-}
-
-static int
-evo_sync(struct drm_device *dev, int ch)
-{
- struct nouveau_device *device = nouveau_dev(dev);
- struct nvd0_display *disp = nvd0_display(dev);
- u32 *push = evo_wait(dev, ch, 8);
- if (push) {
- nouveau_bo_wr32(disp->sync, EVO_MAST_NTFY, 0x00000000);
- evo_mthd(push, 0x0084, 1);
- evo_data(push, 0x80000000 | EVO_MAST_NTFY);
- evo_mthd(push, 0x0080, 2);
- evo_data(push, 0x00000000);
- evo_data(push, 0x00000000);
- evo_kick(push, dev, ch);
- if (nv_wait_cb(device, evo_sync_wait, disp->sync))
- return 0;
- }
-
- return -EBUSY;
-}
-
-/******************************************************************************
- * Page flipping channel
- *****************************************************************************/
-struct nouveau_bo *
-nvd0_display_crtc_sema(struct drm_device *dev, int crtc)
-{
- return nvd0_display(dev)->sync;
-}
-
-void
-nvd0_display_flip_stop(struct drm_crtc *crtc)
-{
- struct nvd0_display *disp = nvd0_display(crtc->dev);
- struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
- struct evo *evo = &disp->evo[EVO_FLIP(nv_crtc->index)];
- u32 *push;
-
- push = evo_wait(crtc->dev, evo->idx, 8);
- if (push) {
- evo_mthd(push, 0x0084, 1);
- evo_data(push, 0x00000000);
- evo_mthd(push, 0x0094, 1);
- evo_data(push, 0x00000000);
- evo_mthd(push, 0x00c0, 1);
- evo_data(push, 0x00000000);
- evo_mthd(push, 0x0080, 1);
- evo_data(push, 0x00000000);
- evo_kick(push, crtc->dev, evo->idx);
- }
-}
-
-int
-nvd0_display_flip_next(struct drm_crtc *crtc, struct drm_framebuffer *fb,
- struct nouveau_channel *chan, u32 swap_interval)
-{
- struct nouveau_framebuffer *nv_fb = nouveau_framebuffer(fb);
- struct nvd0_display *disp = nvd0_display(crtc->dev);
- struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
- struct evo *evo = &disp->evo[EVO_FLIP(nv_crtc->index)];
- u64 offset;
- u32 *push;
- int ret;
-
- swap_interval <<= 4;
- if (swap_interval == 0)
- swap_interval |= 0x100;
-
- push = evo_wait(crtc->dev, evo->idx, 128);
- if (unlikely(push == NULL))
- return -EBUSY;
-
- /* synchronise with the rendering channel, if necessary */
- if (likely(chan)) {
- ret = RING_SPACE(chan, 10);
- if (ret)
- return ret;
-
-
- offset = nvc0_fence_crtc(chan, nv_crtc->index);
- offset += evo->sem.offset;
-
- BEGIN_NVC0(chan, 0, NV84_SUBCHAN_SEMAPHORE_ADDRESS_HIGH, 4);
- OUT_RING (chan, upper_32_bits(offset));
- OUT_RING (chan, lower_32_bits(offset));
- OUT_RING (chan, 0xf00d0000 | evo->sem.value);
- OUT_RING (chan, 0x1002);
- BEGIN_NVC0(chan, 0, NV84_SUBCHAN_SEMAPHORE_ADDRESS_HIGH, 4);
- OUT_RING (chan, upper_32_bits(offset));
- OUT_RING (chan, lower_32_bits(offset ^ 0x10));
- OUT_RING (chan, 0x74b1e000);
- OUT_RING (chan, 0x1001);
- FIRE_RING (chan);
- } else {
- nouveau_bo_wr32(disp->sync, evo->sem.offset / 4,
- 0xf00d0000 | evo->sem.value);
- evo_sync(crtc->dev, EVO_MASTER);
- }
-
- /* queue the flip */
- evo_mthd(push, 0x0100, 1);
- evo_data(push, 0xfffe0000);
- evo_mthd(push, 0x0084, 1);
- evo_data(push, swap_interval);
- if (!(swap_interval & 0x00000100)) {
- evo_mthd(push, 0x00e0, 1);
- evo_data(push, 0x40000000);
- }
- evo_mthd(push, 0x0088, 4);
- evo_data(push, evo->sem.offset);
- evo_data(push, 0xf00d0000 | evo->sem.value);
- evo_data(push, 0x74b1e000);
- evo_data(push, NvEvoSync);
- evo_mthd(push, 0x00a0, 2);
- evo_data(push, 0x00000000);
- evo_data(push, 0x00000000);
- evo_mthd(push, 0x00c0, 1);
- evo_data(push, nv_fb->r_dma);
- evo_mthd(push, 0x0110, 2);
- evo_data(push, 0x00000000);
- evo_data(push, 0x00000000);
- evo_mthd(push, 0x0400, 5);
- evo_data(push, nv_fb->nvbo->bo.offset >> 8);
- evo_data(push, 0);
- evo_data(push, (fb->height << 16) | fb->width);
- evo_data(push, nv_fb->r_pitch);
- evo_data(push, nv_fb->r_format);
- evo_mthd(push, 0x0080, 1);
- evo_data(push, 0x00000000);
- evo_kick(push, crtc->dev, evo->idx);
-
- evo->sem.offset ^= 0x10;
- evo->sem.value++;
- return 0;
-}
-
-/******************************************************************************
- * CRTC
- *****************************************************************************/
-static int
-nvd0_crtc_set_dither(struct nouveau_crtc *nv_crtc, bool update)
-{
- struct nouveau_drm *drm = nouveau_drm(nv_crtc->base.dev);
- struct drm_device *dev = nv_crtc->base.dev;
- struct nouveau_connector *nv_connector;
- struct drm_connector *connector;
- u32 *push, mode = 0x00;
- u32 mthd;
-
- nv_connector = nouveau_crtc_connector_get(nv_crtc);
- connector = &nv_connector->base;
- if (nv_connector->dithering_mode == DITHERING_MODE_AUTO) {
- if (nv_crtc->base.fb->depth > connector->display_info.bpc * 3)
- mode = DITHERING_MODE_DYNAMIC2X2;
- } else {
- mode = nv_connector->dithering_mode;
- }
-
- if (nv_connector->dithering_depth == DITHERING_DEPTH_AUTO) {
- if (connector->display_info.bpc >= 8)
- mode |= DITHERING_DEPTH_8BPC;
- } else {
- mode |= nv_connector->dithering_depth;
- }
-
- if (nv_device(drm->device)->card_type < NV_E0)
- mthd = 0x0490 + (nv_crtc->index * 0x0300);
- else
- mthd = 0x04a0 + (nv_crtc->index * 0x0300);
-
- push = evo_wait(dev, EVO_MASTER, 4);
- if (push) {
- evo_mthd(push, mthd, 1);
- evo_data(push, mode);
- if (update) {
- evo_mthd(push, 0x0080, 1);
- evo_data(push, 0x00000000);
- }
- evo_kick(push, dev, EVO_MASTER);
- }
-
- return 0;
-}
-
-static int
-nvd0_crtc_set_scale(struct nouveau_crtc *nv_crtc, bool update)
-{
- struct drm_display_mode *omode, *umode = &nv_crtc->base.mode;
- struct drm_device *dev = nv_crtc->base.dev;
- struct drm_crtc *crtc = &nv_crtc->base;
- struct nouveau_connector *nv_connector;
- int mode = DRM_MODE_SCALE_NONE;
- u32 oX, oY, *push;
-
- /* start off at the resolution we programmed the crtc for, this
- * effectively handles NONE/FULL scaling
- */
- nv_connector = nouveau_crtc_connector_get(nv_crtc);
- if (nv_connector && nv_connector->native_mode)
- mode = nv_connector->scaling_mode;
-
- if (mode != DRM_MODE_SCALE_NONE)
- omode = nv_connector->native_mode;
- else
- omode = umode;
-
- oX = omode->hdisplay;
- oY = omode->vdisplay;
- if (omode->flags & DRM_MODE_FLAG_DBLSCAN)
- oY *= 2;
-
- /* add overscan compensation if necessary, will keep the aspect
- * ratio the same as the backend mode unless overridden by the
- * user setting both hborder and vborder properties.
- */
- if (nv_connector && ( nv_connector->underscan == UNDERSCAN_ON ||
- (nv_connector->underscan == UNDERSCAN_AUTO &&
- nv_connector->edid &&
- drm_detect_hdmi_monitor(nv_connector->edid)))) {
- u32 bX = nv_connector->underscan_hborder;
- u32 bY = nv_connector->underscan_vborder;
- u32 aspect = (oY << 19) / oX;
-
- if (bX) {
- oX -= (bX * 2);
- if (bY) oY -= (bY * 2);
- else oY = ((oX * aspect) + (aspect / 2)) >> 19;
- } else {
- oX -= (oX >> 4) + 32;
- if (bY) oY -= (bY * 2);
- else oY = ((oX * aspect) + (aspect / 2)) >> 19;
- }
- }
-
- /* handle CENTER/ASPECT scaling, taking into account the areas
- * removed already for overscan compensation
- */
- switch (mode) {
- case DRM_MODE_SCALE_CENTER:
- oX = min((u32)umode->hdisplay, oX);
- oY = min((u32)umode->vdisplay, oY);
- /* fall-through */
- case DRM_MODE_SCALE_ASPECT:
- if (oY < oX) {
- u32 aspect = (umode->hdisplay << 19) / umode->vdisplay;
- oX = ((oY * aspect) + (aspect / 2)) >> 19;
- } else {
- u32 aspect = (umode->vdisplay << 19) / umode->hdisplay;
- oY = ((oX * aspect) + (aspect / 2)) >> 19;
- }
- break;
- default:
- break;
- }
-
- push = evo_wait(dev, EVO_MASTER, 8);
- if (push) {
- evo_mthd(push, 0x04c0 + (nv_crtc->index * 0x300), 3);
- evo_data(push, (oY << 16) | oX);
- evo_data(push, (oY << 16) | oX);
- evo_data(push, (oY << 16) | oX);
- evo_mthd(push, 0x0494 + (nv_crtc->index * 0x300), 1);
- evo_data(push, 0x00000000);
- evo_mthd(push, 0x04b8 + (nv_crtc->index * 0x300), 1);
- evo_data(push, (umode->vdisplay << 16) | umode->hdisplay);
- evo_kick(push, dev, EVO_MASTER);
- if (update) {
- nvd0_display_flip_stop(crtc);
- nvd0_display_flip_next(crtc, crtc->fb, NULL, 1);
- }
- }
-
- return 0;
-}
-
-static int
-nvd0_crtc_set_image(struct nouveau_crtc *nv_crtc, struct drm_framebuffer *fb,
- int x, int y, bool update)
-{
- struct nouveau_framebuffer *nvfb = nouveau_framebuffer(fb);
- u32 *push;
-
- push = evo_wait(fb->dev, EVO_MASTER, 16);
- if (push) {
- evo_mthd(push, 0x0460 + (nv_crtc->index * 0x300), 1);
- evo_data(push, nvfb->nvbo->bo.offset >> 8);
- evo_mthd(push, 0x0468 + (nv_crtc->index * 0x300), 4);
- evo_data(push, (fb->height << 16) | fb->width);
- evo_data(push, nvfb->r_pitch);
- evo_data(push, nvfb->r_format);
- evo_data(push, nvfb->r_dma);
- evo_mthd(push, 0x04b0 + (nv_crtc->index * 0x300), 1);
- evo_data(push, (y << 16) | x);
- if (update) {
- evo_mthd(push, 0x0080, 1);
- evo_data(push, 0x00000000);
- }
- evo_kick(push, fb->dev, EVO_MASTER);
- }
-
- nv_crtc->fb.tile_flags = nvfb->r_dma;
- return 0;
-}
-
-static void
-nvd0_crtc_cursor_show(struct nouveau_crtc *nv_crtc, bool show, bool update)
-{
- struct drm_device *dev = nv_crtc->base.dev;
- u32 *push = evo_wait(dev, EVO_MASTER, 16);
- if (push) {
- if (show) {
- evo_mthd(push, 0x0480 + (nv_crtc->index * 0x300), 2);
- evo_data(push, 0x85000000);
- evo_data(push, nv_crtc->cursor.nvbo->bo.offset >> 8);
- evo_mthd(push, 0x048c + (nv_crtc->index * 0x300), 1);
- evo_data(push, NvEvoVRAM);
- } else {
- evo_mthd(push, 0x0480 + (nv_crtc->index * 0x300), 1);
- evo_data(push, 0x05000000);
- evo_mthd(push, 0x048c + (nv_crtc->index * 0x300), 1);
- evo_data(push, 0x00000000);
- }
-
- if (update) {
- evo_mthd(push, 0x0080, 1);
- evo_data(push, 0x00000000);
- }
-
- evo_kick(push, dev, EVO_MASTER);
- }
-}
-
-static void
-nvd0_crtc_dpms(struct drm_crtc *crtc, int mode)
-{
-}
-
-static void
-nvd0_crtc_prepare(struct drm_crtc *crtc)
-{
- struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
- u32 *push;
-
- nvd0_display_flip_stop(crtc);
-
- push = evo_wait(crtc->dev, EVO_MASTER, 2);
- if (push) {
- evo_mthd(push, 0x0474 + (nv_crtc->index * 0x300), 1);
- evo_data(push, 0x00000000);
- evo_mthd(push, 0x0440 + (nv_crtc->index * 0x300), 1);
- evo_data(push, 0x03000000);
- evo_mthd(push, 0x045c + (nv_crtc->index * 0x300), 1);
- evo_data(push, 0x00000000);
- evo_kick(push, crtc->dev, EVO_MASTER);
- }
-
- nvd0_crtc_cursor_show(nv_crtc, false, false);
-}
-
-static void
-nvd0_crtc_commit(struct drm_crtc *crtc)
-{
- struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
- u32 *push;
-
- push = evo_wait(crtc->dev, EVO_MASTER, 32);
- if (push) {
- evo_mthd(push, 0x0474 + (nv_crtc->index * 0x300), 1);
- evo_data(push, nv_crtc->fb.tile_flags);
- evo_mthd(push, 0x0440 + (nv_crtc->index * 0x300), 4);
- evo_data(push, 0x83000000);
- evo_data(push, nv_crtc->lut.nvbo->bo.offset >> 8);
- evo_data(push, 0x00000000);
- evo_data(push, 0x00000000);
- evo_mthd(push, 0x045c + (nv_crtc->index * 0x300), 1);
- evo_data(push, NvEvoVRAM);
- evo_mthd(push, 0x0430 + (nv_crtc->index * 0x300), 1);
- evo_data(push, 0xffffff00);
- evo_kick(push, crtc->dev, EVO_MASTER);
- }
-
- nvd0_crtc_cursor_show(nv_crtc, nv_crtc->cursor.visible, true);
- nvd0_display_flip_next(crtc, crtc->fb, NULL, 1);
-}
-
-static bool
-nvd0_crtc_mode_fixup(struct drm_crtc *crtc, const struct drm_display_mode *mode,
- struct drm_display_mode *adjusted_mode)
-{
- return true;
-}
-
-static int
-nvd0_crtc_swap_fbs(struct drm_crtc *crtc, struct drm_framebuffer *old_fb)
-{
- struct nouveau_framebuffer *nvfb = nouveau_framebuffer(crtc->fb);
- int ret;
-
- ret = nouveau_bo_pin(nvfb->nvbo, TTM_PL_FLAG_VRAM);
- if (ret)
- return ret;
-
- if (old_fb) {
- nvfb = nouveau_framebuffer(old_fb);
- nouveau_bo_unpin(nvfb->nvbo);
- }
-
- return 0;
-}
-
-static int
-nvd0_crtc_mode_set(struct drm_crtc *crtc, struct drm_display_mode *umode,
- struct drm_display_mode *mode, int x, int y,
- struct drm_framebuffer *old_fb)
-{
- struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
- struct nouveau_connector *nv_connector;
- u32 ilace = (mode->flags & DRM_MODE_FLAG_INTERLACE) ? 2 : 1;
- u32 vscan = (mode->flags & DRM_MODE_FLAG_DBLSCAN) ? 2 : 1;
- u32 hactive, hsynce, hbackp, hfrontp, hblanke, hblanks;
- u32 vactive, vsynce, vbackp, vfrontp, vblanke, vblanks;
- u32 vblan2e = 0, vblan2s = 1;
- u32 *push;
- int ret;
-
- hactive = mode->htotal;
- hsynce = mode->hsync_end - mode->hsync_start - 1;
- hbackp = mode->htotal - mode->hsync_end;
- hblanke = hsynce + hbackp;
- hfrontp = mode->hsync_start - mode->hdisplay;
- hblanks = mode->htotal - hfrontp - 1;
-
- vactive = mode->vtotal * vscan / ilace;
- vsynce = ((mode->vsync_end - mode->vsync_start) * vscan / ilace) - 1;
- vbackp = (mode->vtotal - mode->vsync_end) * vscan / ilace;
- vblanke = vsynce + vbackp;
- vfrontp = (mode->vsync_start - mode->vdisplay) * vscan / ilace;
- vblanks = vactive - vfrontp - 1;
- if (mode->flags & DRM_MODE_FLAG_INTERLACE) {
- vblan2e = vactive + vsynce + vbackp;
- vblan2s = vblan2e + (mode->vdisplay * vscan / ilace);
- vactive = (vactive * 2) + 1;
- }
-
- ret = nvd0_crtc_swap_fbs(crtc, old_fb);
- if (ret)
- return ret;
-
- push = evo_wait(crtc->dev, EVO_MASTER, 64);
- if (push) {
- evo_mthd(push, 0x0410 + (nv_crtc->index * 0x300), 6);
- evo_data(push, 0x00000000);
- evo_data(push, (vactive << 16) | hactive);
- evo_data(push, ( vsynce << 16) | hsynce);
- evo_data(push, (vblanke << 16) | hblanke);
- evo_data(push, (vblanks << 16) | hblanks);
- evo_data(push, (vblan2e << 16) | vblan2s);
- evo_mthd(push, 0x042c + (nv_crtc->index * 0x300), 1);
- evo_data(push, 0x00000000); /* ??? */
- evo_mthd(push, 0x0450 + (nv_crtc->index * 0x300), 3);
- evo_data(push, mode->clock * 1000);
- evo_data(push, 0x00200000); /* ??? */
- evo_data(push, mode->clock * 1000);
- evo_mthd(push, 0x04d0 + (nv_crtc->index * 0x300), 2);
- evo_data(push, 0x00000311);
- evo_data(push, 0x00000100);
- evo_kick(push, crtc->dev, EVO_MASTER);
- }
-
- nv_connector = nouveau_crtc_connector_get(nv_crtc);
- nvd0_crtc_set_dither(nv_crtc, false);
- nvd0_crtc_set_scale(nv_crtc, false);
- nvd0_crtc_set_image(nv_crtc, crtc->fb, x, y, false);
- return 0;
-}
-
-static int
-nvd0_crtc_mode_set_base(struct drm_crtc *crtc, int x, int y,
- struct drm_framebuffer *old_fb)
-{
- struct nouveau_drm *drm = nouveau_drm(crtc->dev);
- struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
- int ret;
-
- if (!crtc->fb) {
- NV_DEBUG(drm, "No FB bound\n");
- return 0;
- }
-
- ret = nvd0_crtc_swap_fbs(crtc, old_fb);
- if (ret)
- return ret;
-
- nvd0_display_flip_stop(crtc);
- nvd0_crtc_set_image(nv_crtc, crtc->fb, x, y, true);
- nvd0_display_flip_next(crtc, crtc->fb, NULL, 1);
- return 0;
-}
-
-static int
-nvd0_crtc_mode_set_base_atomic(struct drm_crtc *crtc,
- struct drm_framebuffer *fb, int x, int y,
- enum mode_set_atomic state)
-{
- struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
- nvd0_display_flip_stop(crtc);
- nvd0_crtc_set_image(nv_crtc, fb, x, y, true);
- return 0;
-}
-
-static void
-nvd0_crtc_lut_load(struct drm_crtc *crtc)
-{
- struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
- void __iomem *lut = nvbo_kmap_obj_iovirtual(nv_crtc->lut.nvbo);
- int i;
-
- for (i = 0; i < 256; i++) {
- writew(0x6000 + (nv_crtc->lut.r[i] >> 2), lut + (i * 0x20) + 0);
- writew(0x6000 + (nv_crtc->lut.g[i] >> 2), lut + (i * 0x20) + 2);
- writew(0x6000 + (nv_crtc->lut.b[i] >> 2), lut + (i * 0x20) + 4);
- }
-}
-
-static int
-nvd0_crtc_cursor_set(struct drm_crtc *crtc, struct drm_file *file_priv,
- uint32_t handle, uint32_t width, uint32_t height)
-{
- struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
- struct drm_device *dev = crtc->dev;
- struct drm_gem_object *gem;
- struct nouveau_bo *nvbo;
- bool visible = (handle != 0);
- int i, ret = 0;
-
- if (visible) {
- if (width != 64 || height != 64)
- return -EINVAL;
-
- gem = drm_gem_object_lookup(dev, file_priv, handle);
- if (unlikely(!gem))
- return -ENOENT;
- nvbo = nouveau_gem_object(gem);
-
- ret = nouveau_bo_map(nvbo);
- if (ret == 0) {
- for (i = 0; i < 64 * 64; i++) {
- u32 v = nouveau_bo_rd32(nvbo, i);
- nouveau_bo_wr32(nv_crtc->cursor.nvbo, i, v);
- }
- nouveau_bo_unmap(nvbo);
- }
-
- drm_gem_object_unreference_unlocked(gem);
- }
-
- if (visible != nv_crtc->cursor.visible) {
- nvd0_crtc_cursor_show(nv_crtc, visible, true);
- nv_crtc->cursor.visible = visible;
- }
-
- return ret;
-}
-
-static int
-nvd0_crtc_cursor_move(struct drm_crtc *crtc, int x, int y)
-{
- struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
- int ch = EVO_CURS(nv_crtc->index);
-
- evo_piow(crtc->dev, ch, 0x0084, (y << 16) | (x & 0xffff));
- evo_piow(crtc->dev, ch, 0x0080, 0x00000000);
- return 0;
-}
-
-static void
-nvd0_crtc_gamma_set(struct drm_crtc *crtc, u16 *r, u16 *g, u16 *b,
- uint32_t start, uint32_t size)
-{
- struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
- u32 end = max(start + size, (u32)256);
- u32 i;
-
- for (i = start; i < end; i++) {
- nv_crtc->lut.r[i] = r[i];
- nv_crtc->lut.g[i] = g[i];
- nv_crtc->lut.b[i] = b[i];
- }
-
- nvd0_crtc_lut_load(crtc);
-}
-
-static void
-nvd0_crtc_destroy(struct drm_crtc *crtc)
-{
- struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
- nouveau_bo_unmap(nv_crtc->cursor.nvbo);
- nouveau_bo_ref(NULL, &nv_crtc->cursor.nvbo);
- nouveau_bo_unmap(nv_crtc->lut.nvbo);
- nouveau_bo_ref(NULL, &nv_crtc->lut.nvbo);
- drm_crtc_cleanup(crtc);
- kfree(crtc);
-}
-
-static const struct drm_crtc_helper_funcs nvd0_crtc_hfunc = {
- .dpms = nvd0_crtc_dpms,
- .prepare = nvd0_crtc_prepare,
- .commit = nvd0_crtc_commit,
- .mode_fixup = nvd0_crtc_mode_fixup,
- .mode_set = nvd0_crtc_mode_set,
- .mode_set_base = nvd0_crtc_mode_set_base,
- .mode_set_base_atomic = nvd0_crtc_mode_set_base_atomic,
- .load_lut = nvd0_crtc_lut_load,
-};
-
-static const struct drm_crtc_funcs nvd0_crtc_func = {
- .cursor_set = nvd0_crtc_cursor_set,
- .cursor_move = nvd0_crtc_cursor_move,
- .gamma_set = nvd0_crtc_gamma_set,
- .set_config = drm_crtc_helper_set_config,
- .destroy = nvd0_crtc_destroy,
- .page_flip = nouveau_crtc_page_flip,
-};
-
-static void
-nvd0_cursor_set_pos(struct nouveau_crtc *nv_crtc, int x, int y)
-{
-}
-
-static void
-nvd0_cursor_set_offset(struct nouveau_crtc *nv_crtc, uint32_t offset)
-{
-}
-
-static int
-nvd0_crtc_create(struct drm_device *dev, int index)
-{
- struct nouveau_crtc *nv_crtc;
- struct drm_crtc *crtc;
- int ret, i;
-
- nv_crtc = kzalloc(sizeof(*nv_crtc), GFP_KERNEL);
- if (!nv_crtc)
- return -ENOMEM;
-
- nv_crtc->index = index;
- nv_crtc->set_dither = nvd0_crtc_set_dither;
- nv_crtc->set_scale = nvd0_crtc_set_scale;
- nv_crtc->cursor.set_offset = nvd0_cursor_set_offset;
- nv_crtc->cursor.set_pos = nvd0_cursor_set_pos;
- for (i = 0; i < 256; i++) {
- nv_crtc->lut.r[i] = i << 8;
- nv_crtc->lut.g[i] = i << 8;
- nv_crtc->lut.b[i] = i << 8;
- }
-
- crtc = &nv_crtc->base;
- drm_crtc_init(dev, crtc, &nvd0_crtc_func);
- drm_crtc_helper_add(crtc, &nvd0_crtc_hfunc);
- drm_mode_crtc_set_gamma_size(crtc, 256);
-
- ret = nouveau_bo_new(dev, 64 * 64 * 4, 0x100, TTM_PL_FLAG_VRAM,
- 0, 0x0000, NULL, &nv_crtc->cursor.nvbo);
- if (!ret) {
- ret = nouveau_bo_pin(nv_crtc->cursor.nvbo, TTM_PL_FLAG_VRAM);
- if (!ret)
- ret = nouveau_bo_map(nv_crtc->cursor.nvbo);
- if (ret)
- nouveau_bo_ref(NULL, &nv_crtc->cursor.nvbo);
- }
-
- if (ret)
- goto out;
-
- ret = nouveau_bo_new(dev, 8192, 0x100, TTM_PL_FLAG_VRAM,
- 0, 0x0000, NULL, &nv_crtc->lut.nvbo);
- if (!ret) {
- ret = nouveau_bo_pin(nv_crtc->lut.nvbo, TTM_PL_FLAG_VRAM);
- if (!ret)
- ret = nouveau_bo_map(nv_crtc->lut.nvbo);
- if (ret)
- nouveau_bo_ref(NULL, &nv_crtc->lut.nvbo);
- }
-
- if (ret)
- goto out;
-
- nvd0_crtc_lut_load(crtc);
-
-out:
- if (ret)
- nvd0_crtc_destroy(crtc);
- return ret;
-}
-
-/******************************************************************************
- * DAC
- *****************************************************************************/
-static void
-nvd0_dac_dpms(struct drm_encoder *encoder, int mode)
-{
- struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
- struct drm_device *dev = encoder->dev;
- struct nouveau_device *device = nouveau_dev(dev);
- int or = nv_encoder->or;
- u32 dpms_ctrl;
-
- dpms_ctrl = 0x80000000;
- if (mode == DRM_MODE_DPMS_STANDBY || mode == DRM_MODE_DPMS_OFF)
- dpms_ctrl |= 0x00000001;
- if (mode == DRM_MODE_DPMS_SUSPEND || mode == DRM_MODE_DPMS_OFF)
- dpms_ctrl |= 0x00000004;
-
- nv_wait(device, 0x61a004 + (or * 0x0800), 0x80000000, 0x00000000);
- nv_mask(device, 0x61a004 + (or * 0x0800), 0xc000007f, dpms_ctrl);
- nv_wait(device, 0x61a004 + (or * 0x0800), 0x80000000, 0x00000000);
-}
-
-static bool
-nvd0_dac_mode_fixup(struct drm_encoder *encoder,
- const struct drm_display_mode *mode,
- struct drm_display_mode *adjusted_mode)
-{
- struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
- struct nouveau_connector *nv_connector;
-
- nv_connector = nouveau_encoder_connector_get(nv_encoder);
- if (nv_connector && nv_connector->native_mode) {
- if (nv_connector->scaling_mode != DRM_MODE_SCALE_NONE) {
- int id = adjusted_mode->base.id;
- *adjusted_mode = *nv_connector->native_mode;
- adjusted_mode->base.id = id;
- }
- }
-
- return true;
-}
-
-static void
-nvd0_dac_commit(struct drm_encoder *encoder)
-{
-}
-
-static void
-nvd0_dac_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode,
- struct drm_display_mode *adjusted_mode)
-{
- struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
- struct nouveau_crtc *nv_crtc = nouveau_crtc(encoder->crtc);
- u32 syncs, magic, *push;
-
- syncs = 0x00000001;
- if (mode->flags & DRM_MODE_FLAG_NHSYNC)
- syncs |= 0x00000008;
- if (mode->flags & DRM_MODE_FLAG_NVSYNC)
- syncs |= 0x00000010;
-
- magic = 0x31ec6000 | (nv_crtc->index << 25);
- if (mode->flags & DRM_MODE_FLAG_INTERLACE)
- magic |= 0x00000001;
-
- nvd0_dac_dpms(encoder, DRM_MODE_DPMS_ON);
-
- push = evo_wait(encoder->dev, EVO_MASTER, 8);
- if (push) {
- evo_mthd(push, 0x0404 + (nv_crtc->index * 0x300), 2);
- evo_data(push, syncs);
- evo_data(push, magic);
- evo_mthd(push, 0x0180 + (nv_encoder->or * 0x020), 2);
- evo_data(push, 1 << nv_crtc->index);
- evo_data(push, 0x00ff);
- evo_kick(push, encoder->dev, EVO_MASTER);
- }
-
- nv_encoder->crtc = encoder->crtc;
-}
-
-static void
-nvd0_dac_disconnect(struct drm_encoder *encoder)
-{
- struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
- struct drm_device *dev = encoder->dev;
- u32 *push;
-
- if (nv_encoder->crtc) {
- nvd0_crtc_prepare(nv_encoder->crtc);
-
- push = evo_wait(dev, EVO_MASTER, 4);
- if (push) {
- evo_mthd(push, 0x0180 + (nv_encoder->or * 0x20), 1);
- evo_data(push, 0x00000000);
- evo_mthd(push, 0x0080, 1);
- evo_data(push, 0x00000000);
- evo_kick(push, dev, EVO_MASTER);
- }
-
- nv_encoder->crtc = NULL;
- }
-}
-
-static enum drm_connector_status
-nvd0_dac_detect(struct drm_encoder *encoder, struct drm_connector *connector)
-{
- enum drm_connector_status status = connector_status_disconnected;
- struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
- struct drm_device *dev = encoder->dev;
- struct nouveau_device *device = nouveau_dev(dev);
- int or = nv_encoder->or;
- u32 load;
-
- nv_wr32(device, 0x61a00c + (or * 0x800), 0x00100000);
- udelay(9500);
- nv_wr32(device, 0x61a00c + (or * 0x800), 0x80000000);
-
- load = nv_rd32(device, 0x61a00c + (or * 0x800));
- if ((load & 0x38000000) == 0x38000000)
- status = connector_status_connected;
-
- nv_wr32(device, 0x61a00c + (or * 0x800), 0x00000000);
- return status;
-}
-
-static void
-nvd0_dac_destroy(struct drm_encoder *encoder)
-{
- drm_encoder_cleanup(encoder);
- kfree(encoder);
-}
-
-static const struct drm_encoder_helper_funcs nvd0_dac_hfunc = {
- .dpms = nvd0_dac_dpms,
- .mode_fixup = nvd0_dac_mode_fixup,
- .prepare = nvd0_dac_disconnect,
- .commit = nvd0_dac_commit,
- .mode_set = nvd0_dac_mode_set,
- .disable = nvd0_dac_disconnect,
- .get_crtc = nvd0_display_crtc_get,
- .detect = nvd0_dac_detect
-};
-
-static const struct drm_encoder_funcs nvd0_dac_func = {
- .destroy = nvd0_dac_destroy,
-};
-
-static int
-nvd0_dac_create(struct drm_connector *connector, struct dcb_output *dcbe)
-{
- struct drm_device *dev = connector->dev;
- struct nouveau_encoder *nv_encoder;
- struct drm_encoder *encoder;
-
- nv_encoder = kzalloc(sizeof(*nv_encoder), GFP_KERNEL);
- if (!nv_encoder)
- return -ENOMEM;
- nv_encoder->dcb = dcbe;
- nv_encoder->or = ffs(dcbe->or) - 1;
-
- encoder = to_drm_encoder(nv_encoder);
- encoder->possible_crtcs = dcbe->heads;
- encoder->possible_clones = 0;
- drm_encoder_init(dev, encoder, &nvd0_dac_func, DRM_MODE_ENCODER_DAC);
- drm_encoder_helper_add(encoder, &nvd0_dac_hfunc);
-
- drm_mode_connector_attach_encoder(connector, encoder);
- return 0;
-}
-
-/******************************************************************************
- * Audio
- *****************************************************************************/
-static void
-nvd0_audio_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode)
-{
- struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
- struct nouveau_connector *nv_connector;
- struct drm_device *dev = encoder->dev;
- struct nouveau_device *device = nouveau_dev(dev);
- int i, or = nv_encoder->or * 0x30;
-
- nv_connector = nouveau_encoder_connector_get(nv_encoder);
- if (!drm_detect_monitor_audio(nv_connector->edid))
- return;
-
- nv_mask(device, 0x10ec10 + or, 0x80000003, 0x80000001);
-
- drm_edid_to_eld(&nv_connector->base, nv_connector->edid);
- if (nv_connector->base.eld[0]) {
- u8 *eld = nv_connector->base.eld;
-
- for (i = 0; i < eld[2] * 4; i++)
- nv_wr32(device, 0x10ec00 + or, (i << 8) | eld[i]);
- for (i = eld[2] * 4; i < 0x60; i++)
- nv_wr32(device, 0x10ec00 + or, (i << 8) | 0x00);
-
- nv_mask(device, 0x10ec10 + or, 0x80000002, 0x80000002);
- }
-}
-
-static void
-nvd0_audio_disconnect(struct drm_encoder *encoder)
-{
- struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
- struct drm_device *dev = encoder->dev;
- struct nouveau_device *device = nouveau_dev(dev);
- int or = nv_encoder->or * 0x30;
-
- nv_mask(device, 0x10ec10 + or, 0x80000003, 0x80000000);
-}
-
-/******************************************************************************
- * HDMI
- *****************************************************************************/
-static void
-nvd0_hdmi_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode)
-{
- struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
- struct nouveau_crtc *nv_crtc = nouveau_crtc(encoder->crtc);
- struct nouveau_connector *nv_connector;
- struct drm_device *dev = encoder->dev;
- struct nouveau_device *device = nouveau_dev(dev);
- int head = nv_crtc->index * 0x800;
- u32 rekey = 56; /* binary driver, and tegra constant */
- u32 max_ac_packet;
-
- nv_connector = nouveau_encoder_connector_get(nv_encoder);
- if (!drm_detect_hdmi_monitor(nv_connector->edid))
- return;
-
- max_ac_packet = mode->htotal - mode->hdisplay;
- max_ac_packet -= rekey;
- max_ac_packet -= 18; /* constant from tegra */
- max_ac_packet /= 32;
-
- /* AVI InfoFrame */
- nv_mask(device, 0x616714 + head, 0x00000001, 0x00000000);
- nv_wr32(device, 0x61671c + head, 0x000d0282);
- nv_wr32(device, 0x616720 + head, 0x0000006f);
- nv_wr32(device, 0x616724 + head, 0x00000000);
- nv_wr32(device, 0x616728 + head, 0x00000000);
- nv_wr32(device, 0x61672c + head, 0x00000000);
- nv_mask(device, 0x616714 + head, 0x00000001, 0x00000001);
-
- /* ??? InfoFrame? */
- nv_mask(device, 0x6167a4 + head, 0x00000001, 0x00000000);
- nv_wr32(device, 0x6167ac + head, 0x00000010);
- nv_mask(device, 0x6167a4 + head, 0x00000001, 0x00000001);
-
- /* HDMI_CTRL */
- nv_mask(device, 0x616798 + head, 0x401f007f, 0x40000000 | rekey |
- max_ac_packet << 16);
-
- /* NFI, audio doesn't work without it though.. */
- nv_mask(device, 0x616548 + head, 0x00000070, 0x00000000);
-
- nvd0_audio_mode_set(encoder, mode);
-}
-
-static void
-nvd0_hdmi_disconnect(struct drm_encoder *encoder)
-{
- struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
- struct nouveau_crtc *nv_crtc = nouveau_crtc(nv_encoder->crtc);
- struct drm_device *dev = encoder->dev;
- struct nouveau_device *device = nouveau_dev(dev);
- int head = nv_crtc->index * 0x800;
-
- nvd0_audio_disconnect(encoder);
-
- nv_mask(device, 0x616798 + head, 0x40000000, 0x00000000);
- nv_mask(device, 0x6167a4 + head, 0x00000001, 0x00000000);
- nv_mask(device, 0x616714 + head, 0x00000001, 0x00000000);
-}
-
-/******************************************************************************
- * SOR
- *****************************************************************************/
-static inline u32
-nvd0_sor_dp_lane_map(struct drm_device *dev, struct dcb_output *dcb, u8 lane)
-{
- static const u8 nvd0[] = { 16, 8, 0, 24 };
- return nvd0[lane];
-}
-
-static void
-nvd0_sor_dp_train_set(struct drm_device *dev, struct dcb_output *dcb, u8 pattern)
-{
- struct nouveau_device *device = nouveau_dev(dev);
- const u32 or = ffs(dcb->or) - 1, link = !(dcb->sorconf.link & 1);
- const u32 loff = (or * 0x800) + (link * 0x80);
- nv_mask(device, 0x61c110 + loff, 0x0f0f0f0f, 0x01010101 * pattern);
-}
-
-static void
-nvd0_sor_dp_train_adj(struct drm_device *dev, struct dcb_output *dcb,
- u8 lane, u8 swing, u8 preem)
-{
- struct nouveau_device *device = nouveau_dev(dev);
- struct nouveau_drm *drm = nouveau_drm(dev);
- const u32 or = ffs(dcb->or) - 1, link = !(dcb->sorconf.link & 1);
- const u32 loff = (or * 0x800) + (link * 0x80);
- u32 shift = nvd0_sor_dp_lane_map(dev, dcb, lane);
- u32 mask = 0x000000ff << shift;
- u8 *table, *entry, *config = NULL;
-
- switch (swing) {
- case 0: preem += 0; break;
- case 1: preem += 4; break;
- case 2: preem += 7; break;
- case 3: preem += 9; break;
- }
-
- table = nouveau_dp_bios_data(dev, dcb, &entry);
- if (table) {
- if (table[0] == 0x30) {
- config = entry + table[4];
- config += table[5] * preem;
- } else
- if (table[0] == 0x40) {
- config = table + table[1];
- config += table[2] * table[3];
- config += table[6] * preem;
- }
- }
-
- if (!config) {
- NV_ERROR(drm, "PDISP: unsupported DP table for chipset\n");
- return;
- }
-
- nv_mask(device, 0x61c118 + loff, mask, config[1] << shift);
- nv_mask(device, 0x61c120 + loff, mask, config[2] << shift);
- nv_mask(device, 0x61c130 + loff, 0x0000ff00, config[3] << 8);
- nv_mask(device, 0x61c13c + loff, 0x00000000, 0x00000000);
-}
-
-static void
-nvd0_sor_dp_link_set(struct drm_device *dev, struct dcb_output *dcb, int crtc,
- int link_nr, u32 link_bw, bool enhframe)
-{
- struct nouveau_device *device = nouveau_dev(dev);
- const u32 or = ffs(dcb->or) - 1, link = !(dcb->sorconf.link & 1);
- const u32 loff = (or * 0x800) + (link * 0x80);
- const u32 soff = (or * 0x800);
- u32 dpctrl = nv_rd32(device, 0x61c10c + loff) & ~0x001f4000;
- u32 clksor = nv_rd32(device, 0x612300 + soff) & ~0x007c0000;
- u32 script = 0x0000, lane_mask = 0;
- u8 *table, *entry;
- int i;
-
- link_bw /= 27000;
-
- table = nouveau_dp_bios_data(dev, dcb, &entry);
- if (table) {
- if (table[0] == 0x30) entry = ROMPTR(dev, entry[10]);
- else if (table[0] == 0x40) entry = ROMPTR(dev, entry[9]);
- else entry = NULL;
-
- while (entry) {
- if (entry[0] >= link_bw)
- break;
- entry += 3;
- }
-
- nouveau_bios_run_init_table(dev, script, dcb, crtc);
- }
-
- clksor |= link_bw << 18;
- dpctrl |= ((1 << link_nr) - 1) << 16;
- if (enhframe)
- dpctrl |= 0x00004000;
-
- for (i = 0; i < link_nr; i++)
- lane_mask |= 1 << (nvd0_sor_dp_lane_map(dev, dcb, i) >> 3);
-
- nv_wr32(device, 0x612300 + soff, clksor);
- nv_wr32(device, 0x61c10c + loff, dpctrl);
- nv_mask(device, 0x61c130 + loff, 0x0000000f, lane_mask);
-}
-
-static void
-nvd0_sor_dp_link_get(struct drm_device *dev, struct dcb_output *dcb,
- u32 *link_nr, u32 *link_bw)
-{
- struct nouveau_device *device = nouveau_dev(dev);
- const u32 or = ffs(dcb->or) - 1, link = !(dcb->sorconf.link & 1);
- const u32 loff = (or * 0x800) + (link * 0x80);
- const u32 soff = (or * 0x800);
- u32 dpctrl = nv_rd32(device, 0x61c10c + loff) & 0x000f0000;
- u32 clksor = nv_rd32(device, 0x612300 + soff);
-
- if (dpctrl > 0x00030000) *link_nr = 4;
- else if (dpctrl > 0x00010000) *link_nr = 2;
- else *link_nr = 1;
-
- *link_bw = (clksor & 0x007c0000) >> 18;
- *link_bw *= 27000;
-}
-
-static void
-nvd0_sor_dp_calc_tu(struct drm_device *dev, struct dcb_output *dcb,
- u32 crtc, u32 datarate)
-{
- struct nouveau_device *device = nouveau_dev(dev);
- const u32 symbol = 100000;
- const u32 TU = 64;
- u32 link_nr, link_bw;
- u64 ratio, value;
-
- nvd0_sor_dp_link_get(dev, dcb, &link_nr, &link_bw);
-
- ratio = datarate;
- ratio *= symbol;
- do_div(ratio, link_nr * link_bw);
-
- value = (symbol - ratio) * TU;
- value *= ratio;
- do_div(value, symbol);
- do_div(value, symbol);
-
- value += 5;
- value |= 0x08000000;
-
- nv_wr32(device, 0x616610 + (crtc * 0x800), value);
-}
-
-static void
-nvd0_sor_dpms(struct drm_encoder *encoder, int mode)
-{
- struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
- struct drm_device *dev = encoder->dev;
- struct nouveau_device *device = nouveau_dev(dev);
- struct drm_encoder *partner;
- int or = nv_encoder->or;
- u32 dpms_ctrl;
-
- nv_encoder->last_dpms = mode;
-
- list_for_each_entry(partner, &dev->mode_config.encoder_list, head) {
- struct nouveau_encoder *nv_partner = nouveau_encoder(partner);
-
- if (partner->encoder_type != DRM_MODE_ENCODER_TMDS)
- continue;
-
- if (nv_partner != nv_encoder &&
- nv_partner->dcb->or == nv_encoder->dcb->or) {
- if (nv_partner->last_dpms == DRM_MODE_DPMS_ON)
- return;
- break;
- }
- }
-
- dpms_ctrl = (mode == DRM_MODE_DPMS_ON);
- dpms_ctrl |= 0x80000000;
-
- nv_wait(device, 0x61c004 + (or * 0x0800), 0x80000000, 0x00000000);
- nv_mask(device, 0x61c004 + (or * 0x0800), 0x80000001, dpms_ctrl);
- nv_wait(device, 0x61c004 + (or * 0x0800), 0x80000000, 0x00000000);
- nv_wait(device, 0x61c030 + (or * 0x0800), 0x10000000, 0x00000000);
-
- if (nv_encoder->dcb->type == DCB_OUTPUT_DP) {
- struct dp_train_func func = {
- .link_set = nvd0_sor_dp_link_set,
- .train_set = nvd0_sor_dp_train_set,
- .train_adj = nvd0_sor_dp_train_adj
- };
-
- nouveau_dp_dpms(encoder, mode, nv_encoder->dp.datarate, &func);
- }
-}
-
-static bool
-nvd0_sor_mode_fixup(struct drm_encoder *encoder,
- const struct drm_display_mode *mode,
- struct drm_display_mode *adjusted_mode)
-{
- struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
- struct nouveau_connector *nv_connector;
-
- nv_connector = nouveau_encoder_connector_get(nv_encoder);
- if (nv_connector && nv_connector->native_mode) {
- if (nv_connector->scaling_mode != DRM_MODE_SCALE_NONE) {
- int id = adjusted_mode->base.id;
- *adjusted_mode = *nv_connector->native_mode;
- adjusted_mode->base.id = id;
- }
- }
-
- return true;
-}
-
-static void
-nvd0_sor_disconnect(struct drm_encoder *encoder)
-{
- struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
- struct drm_device *dev = encoder->dev;
- u32 *push;
-
- if (nv_encoder->crtc) {
- nvd0_crtc_prepare(nv_encoder->crtc);
-
- push = evo_wait(dev, EVO_MASTER, 4);
- if (push) {
- evo_mthd(push, 0x0200 + (nv_encoder->or * 0x20), 1);
- evo_data(push, 0x00000000);
- evo_mthd(push, 0x0080, 1);
- evo_data(push, 0x00000000);
- evo_kick(push, dev, EVO_MASTER);
- }
-
- nvd0_hdmi_disconnect(encoder);
-
- nv_encoder->crtc = NULL;
- nv_encoder->last_dpms = DRM_MODE_DPMS_OFF;
- }
-}
-
-static void
-nvd0_sor_prepare(struct drm_encoder *encoder)
-{
- nvd0_sor_disconnect(encoder);
- if (nouveau_encoder(encoder)->dcb->type == DCB_OUTPUT_DP)
- evo_sync(encoder->dev, EVO_MASTER);
-}
-
-static void
-nvd0_sor_commit(struct drm_encoder *encoder)
-{
-}
-
-static void
-nvd0_sor_mode_set(struct drm_encoder *encoder, struct drm_display_mode *umode,
- struct drm_display_mode *mode)
-{
- struct drm_device *dev = encoder->dev;
- struct nouveau_drm *drm = nouveau_drm(dev);
- struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
- struct nouveau_crtc *nv_crtc = nouveau_crtc(encoder->crtc);
- struct nouveau_connector *nv_connector;
- struct nvbios *bios = &drm->vbios;
- u32 mode_ctrl = (1 << nv_crtc->index);
- u32 syncs, magic, *push;
- u32 or_config;
-
- syncs = 0x00000001;
- if (mode->flags & DRM_MODE_FLAG_NHSYNC)
- syncs |= 0x00000008;
- if (mode->flags & DRM_MODE_FLAG_NVSYNC)
- syncs |= 0x00000010;
-
- magic = 0x31ec6000 | (nv_crtc->index << 25);
- if (mode->flags & DRM_MODE_FLAG_INTERLACE)
- magic |= 0x00000001;
-
- nv_connector = nouveau_encoder_connector_get(nv_encoder);
- switch (nv_encoder->dcb->type) {
- case DCB_OUTPUT_TMDS:
- if (nv_encoder->dcb->sorconf.link & 1) {
- if (mode->clock < 165000)
- mode_ctrl |= 0x00000100;
- else
- mode_ctrl |= 0x00000500;
- } else {
- mode_ctrl |= 0x00000200;
- }
-
- or_config = (mode_ctrl & 0x00000f00) >> 8;
- if (mode->clock >= 165000)
- or_config |= 0x0100;
-
- nvd0_hdmi_mode_set(encoder, mode);
- break;
- case DCB_OUTPUT_LVDS:
- or_config = (mode_ctrl & 0x00000f00) >> 8;
- if (bios->fp_no_ddc) {
- if (bios->fp.dual_link)
- or_config |= 0x0100;
- if (bios->fp.if_is_24bit)
- or_config |= 0x0200;
- } else {
- if (nv_connector->type == DCB_CONNECTOR_LVDS_SPWG) {
- if (((u8 *)nv_connector->edid)[121] == 2)
- or_config |= 0x0100;
- } else
- if (mode->clock >= bios->fp.duallink_transition_clk) {
- or_config |= 0x0100;
- }
-
- if (or_config & 0x0100) {
- if (bios->fp.strapless_is_24bit & 2)
- or_config |= 0x0200;
- } else {
- if (bios->fp.strapless_is_24bit & 1)
- or_config |= 0x0200;
- }
-
- if (nv_connector->base.display_info.bpc == 8)
- or_config |= 0x0200;
-
- }
- break;
- case DCB_OUTPUT_DP:
- if (nv_connector->base.display_info.bpc == 6) {
- nv_encoder->dp.datarate = mode->clock * 18 / 8;
- syncs |= 0x00000002 << 6;
- } else {
- nv_encoder->dp.datarate = mode->clock * 24 / 8;
- syncs |= 0x00000005 << 6;
- }
-
- if (nv_encoder->dcb->sorconf.link & 1)
- mode_ctrl |= 0x00000800;
- else
- mode_ctrl |= 0x00000900;
-
- or_config = (mode_ctrl & 0x00000f00) >> 8;
- break;
- default:
- BUG_ON(1);
- break;
- }
-
- nvd0_sor_dpms(encoder, DRM_MODE_DPMS_ON);
-
- if (nv_encoder->dcb->type == DCB_OUTPUT_DP) {
- nvd0_sor_dp_calc_tu(dev, nv_encoder->dcb, nv_crtc->index,
- nv_encoder->dp.datarate);
- }
-
- push = evo_wait(dev, EVO_MASTER, 8);
- if (push) {
- evo_mthd(push, 0x0404 + (nv_crtc->index * 0x300), 2);
- evo_data(push, syncs);
- evo_data(push, magic);
- evo_mthd(push, 0x0200 + (nv_encoder->or * 0x020), 2);
- evo_data(push, mode_ctrl);
- evo_data(push, or_config);
- evo_kick(push, dev, EVO_MASTER);
- }
-
- nv_encoder->crtc = encoder->crtc;
-}
-
-static void
-nvd0_sor_destroy(struct drm_encoder *encoder)
-{
- drm_encoder_cleanup(encoder);
- kfree(encoder);
-}
-
-static const struct drm_encoder_helper_funcs nvd0_sor_hfunc = {
- .dpms = nvd0_sor_dpms,
- .mode_fixup = nvd0_sor_mode_fixup,
- .prepare = nvd0_sor_prepare,
- .commit = nvd0_sor_commit,
- .mode_set = nvd0_sor_mode_set,
- .disable = nvd0_sor_disconnect,
- .get_crtc = nvd0_display_crtc_get,
-};
-
-static const struct drm_encoder_funcs nvd0_sor_func = {
- .destroy = nvd0_sor_destroy,
-};
-
-static int
-nvd0_sor_create(struct drm_connector *connector, struct dcb_output *dcbe)
-{
- struct drm_device *dev = connector->dev;
- struct nouveau_encoder *nv_encoder;
- struct drm_encoder *encoder;
-
- nv_encoder = kzalloc(sizeof(*nv_encoder), GFP_KERNEL);
- if (!nv_encoder)
- return -ENOMEM;
- nv_encoder->dcb = dcbe;
- nv_encoder->or = ffs(dcbe->or) - 1;
- nv_encoder->last_dpms = DRM_MODE_DPMS_OFF;
-
- encoder = to_drm_encoder(nv_encoder);
- encoder->possible_crtcs = dcbe->heads;
- encoder->possible_clones = 0;
- drm_encoder_init(dev, encoder, &nvd0_sor_func, DRM_MODE_ENCODER_TMDS);
- drm_encoder_helper_add(encoder, &nvd0_sor_hfunc);
-
- drm_mode_connector_attach_encoder(connector, encoder);
- return 0;
-}
-
-/******************************************************************************
- * IRQ
- *****************************************************************************/
-static struct dcb_output *
-lookup_dcb(struct drm_device *dev, int id, u32 mc)
-{
- struct nouveau_drm *drm = nouveau_drm(dev);
- int type, or, i, link = -1;
-
- if (id < 4) {
- type = DCB_OUTPUT_ANALOG;
- or = id;
- } else {
- switch (mc & 0x00000f00) {
- case 0x00000000: link = 0; type = DCB_OUTPUT_LVDS; break;
- case 0x00000100: link = 0; type = DCB_OUTPUT_TMDS; break;
- case 0x00000200: link = 1; type = DCB_OUTPUT_TMDS; break;
- case 0x00000500: link = 0; type = DCB_OUTPUT_TMDS; break;
- case 0x00000800: link = 0; type = DCB_OUTPUT_DP; break;
- case 0x00000900: link = 1; type = DCB_OUTPUT_DP; break;
- default:
- NV_ERROR(drm, "PDISP: unknown SOR mc 0x%08x\n", mc);
- return NULL;
- }
-
- or = id - 4;
- }
-
- for (i = 0; i < drm->vbios.dcb.entries; i++) {
- struct dcb_output *dcb = &drm->vbios.dcb.entry[i];
- if (dcb->type == type && (dcb->or & (1 << or)) &&
- (link < 0 || link == !(dcb->sorconf.link & 1)))
- return dcb;
- }
-
- NV_ERROR(drm, "PDISP: DCB for %d/0x%08x not found\n", id, mc);
- return NULL;
-}
-
-static void
-nvd0_display_unk1_handler(struct drm_device *dev, u32 crtc, u32 mask)
-{
- struct nouveau_device *device = nouveau_dev(dev);
- struct dcb_output *dcb;
- int i;
-
- for (i = 0; mask && i < 8; i++) {
- u32 mcc = nv_rd32(device, 0x640180 + (i * 0x20));
- if (!(mcc & (1 << crtc)))
- continue;
-
- dcb = lookup_dcb(dev, i, mcc);
- if (!dcb)
- continue;
-
- nouveau_bios_run_display_table(dev, 0x0000, -1, dcb, crtc);
- }
-
- nv_wr32(device, 0x6101d4, 0x00000000);
- nv_wr32(device, 0x6109d4, 0x00000000);
- nv_wr32(device, 0x6101d0, 0x80000000);
-}
-
-static void
-nvd0_display_unk2_handler(struct drm_device *dev, u32 crtc, u32 mask)
-{
- struct nouveau_device *device = nouveau_dev(dev);
- struct nouveau_drm *drm = nouveau_drm(dev);
- struct dcb_output *dcb;
- u32 or, tmp, pclk;
- int i;
-
- for (i = 0; mask && i < 8; i++) {
- u32 mcc = nv_rd32(device, 0x640180 + (i * 0x20));
- if (!(mcc & (1 << crtc)))
- continue;
-
- dcb = lookup_dcb(dev, i, mcc);
- if (!dcb)
- continue;
-
- nouveau_bios_run_display_table(dev, 0x0000, -2, dcb, crtc);
- }
-
- pclk = nv_rd32(device, 0x660450 + (crtc * 0x300)) / 1000;
- NV_DEBUG(drm, "PDISP: crtc %d pclk %d mask 0x%08x\n",
- crtc, pclk, mask);
- if (pclk && (mask & 0x00010000)) {
- nv50_crtc_set_clock(dev, crtc, pclk);
- }
-
- for (i = 0; mask && i < 8; i++) {
- u32 mcp = nv_rd32(device, 0x660180 + (i * 0x20));
- u32 cfg = nv_rd32(device, 0x660184 + (i * 0x20));
- if (!(mcp & (1 << crtc)))
- continue;
-
- dcb = lookup_dcb(dev, i, mcp);
- if (!dcb)
- continue;
- or = ffs(dcb->or) - 1;
-
- nouveau_bios_run_display_table(dev, cfg, pclk, dcb, crtc);
-
- nv_wr32(device, 0x612200 + (crtc * 0x800), 0x00000000);
- switch (dcb->type) {
- case DCB_OUTPUT_ANALOG:
- nv_wr32(device, 0x612280 + (or * 0x800), 0x00000000);
- break;
- case DCB_OUTPUT_TMDS:
- case DCB_OUTPUT_LVDS:
- case DCB_OUTPUT_DP:
- if (cfg & 0x00000100)
- tmp = 0x00000101;
- else
- tmp = 0x00000000;
-
- nv_mask(device, 0x612300 + (or * 0x800), 0x00000707, tmp);
- break;
- default:
- break;
- }
-
- break;
- }
-
- nv_wr32(device, 0x6101d4, 0x00000000);
- nv_wr32(device, 0x6109d4, 0x00000000);
- nv_wr32(device, 0x6101d0, 0x80000000);
-}
-
-static void
-nvd0_display_unk4_handler(struct drm_device *dev, u32 crtc, u32 mask)
-{
- struct nouveau_device *device = nouveau_dev(dev);
- struct dcb_output *dcb;
- int pclk, i;
-
- pclk = nv_rd32(device, 0x660450 + (crtc * 0x300)) / 1000;
-
- for (i = 0; mask && i < 8; i++) {
- u32 mcp = nv_rd32(device, 0x660180 + (i * 0x20));
- u32 cfg = nv_rd32(device, 0x660184 + (i * 0x20));
- if (!(mcp & (1 << crtc)))
- continue;
-
- dcb = lookup_dcb(dev, i, mcp);
- if (!dcb)
- continue;
-
- nouveau_bios_run_display_table(dev, cfg, -pclk, dcb, crtc);
- }
-
- nv_wr32(device, 0x6101d4, 0x00000000);
- nv_wr32(device, 0x6109d4, 0x00000000);
- nv_wr32(device, 0x6101d0, 0x80000000);
-}
-
-static void
-nvd0_display_bh(unsigned long data)
-{
- struct drm_device *dev = (struct drm_device *)data;
- struct nouveau_device *device = nouveau_dev(dev);
- struct nouveau_drm *drm = nouveau_drm(dev);
- struct nvd0_display *disp = nvd0_display(dev);
- u32 mask = 0, crtc = ~0;
- int i;
-
- if (drm_debug & (DRM_UT_DRIVER | DRM_UT_KMS)) {
- NV_INFO(drm, "PDISP: modeset req %d\n", disp->modeset);
- NV_INFO(drm, " STAT: 0x%08x 0x%08x 0x%08x\n",
- nv_rd32(device, 0x6101d0),
- nv_rd32(device, 0x6101d4), nv_rd32(device, 0x6109d4));
- for (i = 0; i < 8; i++) {
- NV_INFO(drm, " %s%d: 0x%08x 0x%08x\n",
- i < 4 ? "DAC" : "SOR", i,
- nv_rd32(device, 0x640180 + (i * 0x20)),
- nv_rd32(device, 0x660180 + (i * 0x20)));
- }
- }
-
- while (!mask && ++crtc < dev->mode_config.num_crtc)
- mask = nv_rd32(device, 0x6101d4 + (crtc * 0x800));
-
- if (disp->modeset & 0x00000001)
- nvd0_display_unk1_handler(dev, crtc, mask);
- if (disp->modeset & 0x00000002)
- nvd0_display_unk2_handler(dev, crtc, mask);
- if (disp->modeset & 0x00000004)
- nvd0_display_unk4_handler(dev, crtc, mask);
-}
-
-void
-nvd0_display_intr(struct drm_device *dev)
-{
- struct nvd0_display *disp = nvd0_display(dev);
- struct nouveau_device *device = nouveau_dev(dev);
- struct nouveau_drm *drm = nouveau_drm(dev);
- u32 intr = nv_rd32(device, 0x610088);
-
- if (intr & 0x00000001) {
- u32 stat = nv_rd32(device, 0x61008c);
- nv_wr32(device, 0x61008c, stat);
- intr &= ~0x00000001;
- }
-
- if (intr & 0x00000002) {
- u32 stat = nv_rd32(device, 0x61009c);
- int chid = ffs(stat) - 1;
- if (chid >= 0) {
- u32 mthd = nv_rd32(device, 0x6101f0 + (chid * 12));
- u32 data = nv_rd32(device, 0x6101f4 + (chid * 12));
- u32 unkn = nv_rd32(device, 0x6101f8 + (chid * 12));
-
- NV_INFO(drm, "EvoCh: chid %d mthd 0x%04x data 0x%08x "
- "0x%08x 0x%08x\n",
- chid, (mthd & 0x0000ffc), data, mthd, unkn);
- nv_wr32(device, 0x61009c, (1 << chid));
- nv_wr32(device, 0x6101f0 + (chid * 12), 0x90000000);
- }
-
- intr &= ~0x00000002;
- }
-
- if (intr & 0x00100000) {
- u32 stat = nv_rd32(device, 0x6100ac);
-
- if (stat & 0x00000007) {
- disp->modeset = stat;
- tasklet_schedule(&disp->tasklet);
-
- nv_wr32(device, 0x6100ac, (stat & 0x00000007));
- stat &= ~0x00000007;
- }
-
- if (stat) {
- NV_INFO(drm, "PDISP: unknown intr24 0x%08x\n", stat);
- nv_wr32(device, 0x6100ac, stat);
- }
-
- intr &= ~0x00100000;
- }
-
- intr &= ~0x0f000000; /* vblank, handled in core */
- if (intr)
- NV_INFO(drm, "PDISP: unknown intr 0x%08x\n", intr);
-}
-
-/******************************************************************************
- * Init
- *****************************************************************************/
-void
-nvd0_display_fini(struct drm_device *dev)
-{
- int i;
-
- /* fini cursors + overlays + flips */
- for (i = 1; i >= 0; i--) {
- evo_fini_pio(dev, EVO_CURS(i));
- evo_fini_pio(dev, EVO_OIMM(i));
- evo_fini_dma(dev, EVO_OVLY(i));
- evo_fini_dma(dev, EVO_FLIP(i));
- }
-
- /* fini master */
- evo_fini_dma(dev, EVO_MASTER);
-}
-
-int
-nvd0_display_init(struct drm_device *dev)
-{
- struct nvd0_display *disp = nvd0_display(dev);
- struct nouveau_device *device = nouveau_dev(dev);
- struct nouveau_drm *drm = nouveau_drm(dev);
- int ret, i;
- u32 *push;
-
- if (nv_rd32(device, 0x6100ac) & 0x00000100) {
- nv_wr32(device, 0x6100ac, 0x00000100);
- nv_mask(device, 0x6194e8, 0x00000001, 0x00000000);
- if (!nv_wait(device, 0x6194e8, 0x00000002, 0x00000000)) {
- NV_ERROR(drm, "PDISP: 0x6194e8 0x%08x\n",
- nv_rd32(device, 0x6194e8));
- return -EBUSY;
- }
- }
-
- /* nfi what these are exactly, i do know that SOR_MODE_CTRL won't
- * work at all unless you do the SOR part below.
- */
- for (i = 0; i < 3; i++) {
- u32 dac = nv_rd32(device, 0x61a000 + (i * 0x800));
- nv_wr32(device, 0x6101c0 + (i * 0x800), dac);
- }
-
- for (i = 0; i < 4; i++) {
- u32 sor = nv_rd32(device, 0x61c000 + (i * 0x800));
- nv_wr32(device, 0x6301c4 + (i * 0x800), sor);
- }
-
- for (i = 0; i < dev->mode_config.num_crtc; i++) {
- u32 crtc0 = nv_rd32(device, 0x616104 + (i * 0x800));
- u32 crtc1 = nv_rd32(device, 0x616108 + (i * 0x800));
- u32 crtc2 = nv_rd32(device, 0x61610c + (i * 0x800));
- nv_wr32(device, 0x6101b4 + (i * 0x800), crtc0);
- nv_wr32(device, 0x6101b8 + (i * 0x800), crtc1);
- nv_wr32(device, 0x6101bc + (i * 0x800), crtc2);
- }
-
- /* point at our hash table / objects, enable interrupts */
- nv_wr32(device, 0x610010, (disp->mem->addr >> 8) | 9);
- nv_mask(device, 0x6100b0, 0x00000307, 0x00000307);
-
- /* init master */
- ret = evo_init_dma(dev, EVO_MASTER);
- if (ret)
- goto error;
-
- /* init flips + overlays + cursors */
- for (i = 0; i < dev->mode_config.num_crtc; i++) {
- if ((ret = evo_init_dma(dev, EVO_FLIP(i))) ||
- (ret = evo_init_dma(dev, EVO_OVLY(i))) ||
- (ret = evo_init_pio(dev, EVO_OIMM(i))) ||
- (ret = evo_init_pio(dev, EVO_CURS(i))))
- goto error;
- }
-
- push = evo_wait(dev, EVO_MASTER, 32);
- if (!push) {
- ret = -EBUSY;
- goto error;
- }
- evo_mthd(push, 0x0088, 1);
- evo_data(push, NvEvoSync);
- evo_mthd(push, 0x0084, 1);
- evo_data(push, 0x00000000);
- evo_mthd(push, 0x0084, 1);
- evo_data(push, 0x80000000);
- evo_mthd(push, 0x008c, 1);
- evo_data(push, 0x00000000);
- evo_kick(push, dev, EVO_MASTER);
-
-error:
- if (ret)
- nvd0_display_fini(dev);
- return ret;
-}
-
-void
-nvd0_display_destroy(struct drm_device *dev)
-{
- struct nvd0_display *disp = nvd0_display(dev);
- struct pci_dev *pdev = dev->pdev;
- int i;
-
- for (i = 0; i < EVO_DMA_NR; i++) {
- struct evo *evo = &disp->evo[i];
- pci_free_consistent(pdev, PAGE_SIZE, evo->ptr, evo->handle);
- }
-
- nouveau_gpuobj_ref(NULL, &disp->mem);
- nouveau_bo_unmap(disp->sync);
- nouveau_bo_ref(NULL, &disp->sync);
-
- nouveau_display(dev)->priv = NULL;
- kfree(disp);
-}
-
-int
-nvd0_display_create(struct drm_device *dev)
-{
- struct nouveau_device *device = nouveau_dev(dev);
- struct nouveau_drm *drm = nouveau_drm(dev);
- struct nouveau_bar *bar = nouveau_bar(device);
- struct nouveau_fb *pfb = nouveau_fb(device);
- struct dcb_table *dcb = &drm->vbios.dcb;
- struct drm_connector *connector, *tmp;
- struct pci_dev *pdev = dev->pdev;
- struct nvd0_display *disp;
- struct dcb_output *dcbe;
- int crtcs, ret, i;
-
- disp = kzalloc(sizeof(*disp), GFP_KERNEL);
- if (!disp)
- return -ENOMEM;
-
- nouveau_display(dev)->priv = disp;
- nouveau_display(dev)->dtor = nvd0_display_destroy;
- nouveau_display(dev)->init = nvd0_display_init;
- nouveau_display(dev)->fini = nvd0_display_fini;
-
- /* create crtc objects to represent the hw heads */
- crtcs = nv_rd32(device, 0x022448);
- for (i = 0; i < crtcs; i++) {
- ret = nvd0_crtc_create(dev, i);
- if (ret)
- goto out;
- }
-
- /* create encoder/connector objects based on VBIOS DCB table */
- for (i = 0, dcbe = &dcb->entry[0]; i < dcb->entries; i++, dcbe++) {
- connector = nouveau_connector_create(dev, dcbe->connector);
- if (IS_ERR(connector))
- continue;
-
- if (dcbe->location != DCB_LOC_ON_CHIP) {
- NV_WARN(drm, "skipping off-chip encoder %d/%d\n",
- dcbe->type, ffs(dcbe->or) - 1);
- continue;
- }
-
- switch (dcbe->type) {
- case DCB_OUTPUT_TMDS:
- case DCB_OUTPUT_LVDS:
- case DCB_OUTPUT_DP:
- nvd0_sor_create(connector, dcbe);
- break;
- case DCB_OUTPUT_ANALOG:
- nvd0_dac_create(connector, dcbe);
- break;
- default:
- NV_WARN(drm, "skipping unsupported encoder %d/%d\n",
- dcbe->type, ffs(dcbe->or) - 1);
- continue;
- }
- }
-
- /* cull any connectors we created that don't have an encoder */
- list_for_each_entry_safe(connector, tmp, &dev->mode_config.connector_list, head) {
- if (connector->encoder_ids[0])
- continue;
-
- NV_WARN(drm, "%s has no encoders, removing\n",
- drm_get_connector_name(connector));
- connector->funcs->destroy(connector);
- }
-
- /* setup interrupt handling */
- tasklet_init(&disp->tasklet, nvd0_display_bh, (unsigned long)dev);
-
- /* small shared memory area we use for notifiers and semaphores */
- ret = nouveau_bo_new(dev, 4096, 0x1000, TTM_PL_FLAG_VRAM,
- 0, 0x0000, NULL, &disp->sync);
- if (!ret) {
- ret = nouveau_bo_pin(disp->sync, TTM_PL_FLAG_VRAM);
- if (!ret)
- ret = nouveau_bo_map(disp->sync);
- if (ret)
- nouveau_bo_ref(NULL, &disp->sync);
- }
-
- if (ret)
- goto out;
-
- /* hash table and dma objects for the memory areas we care about */
- ret = nouveau_gpuobj_new(nv_object(device), NULL, 0x4000, 0x10000,
- NVOBJ_FLAG_ZERO_ALLOC, &disp->mem);
- if (ret)
- goto out;
-
- /* create evo dma channels */
- for (i = 0; i < EVO_DMA_NR; i++) {
- struct evo *evo = &disp->evo[i];
- u64 offset = disp->sync->bo.offset;
- u32 dmao = 0x1000 + (i * 0x100);
- u32 hash = 0x0000 + (i * 0x040);
-
- evo->idx = i;
- evo->sem.offset = EVO_SYNC(evo->idx, 0x00);
- evo->ptr = pci_alloc_consistent(pdev, PAGE_SIZE, &evo->handle);
- if (!evo->ptr) {
- ret = -ENOMEM;
- goto out;
- }
-
- nv_wo32(disp->mem, dmao + 0x00, 0x00000049);
- nv_wo32(disp->mem, dmao + 0x04, (offset + 0x0000) >> 8);
- nv_wo32(disp->mem, dmao + 0x08, (offset + 0x0fff) >> 8);
- nv_wo32(disp->mem, dmao + 0x0c, 0x00000000);
- nv_wo32(disp->mem, dmao + 0x10, 0x00000000);
- nv_wo32(disp->mem, dmao + 0x14, 0x00000000);
- nv_wo32(disp->mem, hash + 0x00, NvEvoSync);
- nv_wo32(disp->mem, hash + 0x04, 0x00000001 | (i << 27) |
- ((dmao + 0x00) << 9));
-
- nv_wo32(disp->mem, dmao + 0x20, 0x00000049);
- nv_wo32(disp->mem, dmao + 0x24, 0x00000000);
- nv_wo32(disp->mem, dmao + 0x28, (pfb->ram.size - 1) >> 8);
- nv_wo32(disp->mem, dmao + 0x2c, 0x00000000);
- nv_wo32(disp->mem, dmao + 0x30, 0x00000000);
- nv_wo32(disp->mem, dmao + 0x34, 0x00000000);
- nv_wo32(disp->mem, hash + 0x08, NvEvoVRAM);
- nv_wo32(disp->mem, hash + 0x0c, 0x00000001 | (i << 27) |
- ((dmao + 0x20) << 9));
-
- nv_wo32(disp->mem, dmao + 0x40, 0x00000009);
- nv_wo32(disp->mem, dmao + 0x44, 0x00000000);
- nv_wo32(disp->mem, dmao + 0x48, (pfb->ram.size - 1) >> 8);
- nv_wo32(disp->mem, dmao + 0x4c, 0x00000000);
- nv_wo32(disp->mem, dmao + 0x50, 0x00000000);
- nv_wo32(disp->mem, dmao + 0x54, 0x00000000);
- nv_wo32(disp->mem, hash + 0x10, NvEvoVRAM_LP);
- nv_wo32(disp->mem, hash + 0x14, 0x00000001 | (i << 27) |
- ((dmao + 0x40) << 9));
-
- nv_wo32(disp->mem, dmao + 0x60, 0x0fe00009);
- nv_wo32(disp->mem, dmao + 0x64, 0x00000000);
- nv_wo32(disp->mem, dmao + 0x68, (pfb->ram.size - 1) >> 8);
- nv_wo32(disp->mem, dmao + 0x6c, 0x00000000);
- nv_wo32(disp->mem, dmao + 0x70, 0x00000000);
- nv_wo32(disp->mem, dmao + 0x74, 0x00000000);
- nv_wo32(disp->mem, hash + 0x18, NvEvoFB32);
- nv_wo32(disp->mem, hash + 0x1c, 0x00000001 | (i << 27) |
- ((dmao + 0x60) << 9));
- }
-
- bar->flush(bar);
-
-out:
- if (ret)
- nvd0_display_destroy(dev);
- return ret;
-}
diff --git a/drivers/gpu/drm/radeon/atombios_crtc.c b/drivers/gpu/drm/radeon/atombios_crtc.c
index 2e566e123e9e..9175615bbd8a 100644
--- a/drivers/gpu/drm/radeon/atombios_crtc.c
+++ b/drivers/gpu/drm/radeon/atombios_crtc.c
@@ -561,6 +561,8 @@ static u32 atombios_adjust_pll(struct drm_crtc *crtc,
/* use frac fb div on APUs */
if (ASIC_IS_DCE41(rdev) || ASIC_IS_DCE61(rdev))
radeon_crtc->pll_flags |= RADEON_PLL_USE_FRAC_FB_DIV;
+ if (ASIC_IS_DCE32(rdev) && mode->clock > 165000)
+ radeon_crtc->pll_flags |= RADEON_PLL_USE_FRAC_FB_DIV;
} else {
radeon_crtc->pll_flags |= RADEON_PLL_LEGACY;
@@ -1697,34 +1699,22 @@ static int radeon_atom_pick_pll(struct drm_crtc *crtc)
DRM_ERROR("unable to allocate a PPLL\n");
return ATOM_PPLL_INVALID;
} else {
- if (ASIC_IS_AVIVO(rdev)) {
- /* in DP mode, the DP ref clock can come from either PPLL
- * depending on the asic:
- * DCE3: PPLL1 or PPLL2
- */
- if (ENCODER_MODE_IS_DP(atombios_get_encoder_mode(radeon_crtc->encoder))) {
- /* use the same PPLL for all DP monitors */
- pll = radeon_get_shared_dp_ppll(crtc);
- if (pll != ATOM_PPLL_INVALID)
- return pll;
- } else {
- /* use the same PPLL for all monitors with the same clock */
- pll = radeon_get_shared_nondp_ppll(crtc);
- if (pll != ATOM_PPLL_INVALID)
- return pll;
- }
- /* all other cases */
- pll_in_use = radeon_get_pll_use_mask(crtc);
- if (!(pll_in_use & (1 << ATOM_PPLL1)))
- return ATOM_PPLL1;
- if (!(pll_in_use & (1 << ATOM_PPLL2)))
- return ATOM_PPLL2;
- DRM_ERROR("unable to allocate a PPLL\n");
- return ATOM_PPLL_INVALID;
- } else {
- /* on pre-R5xx asics, the crtc to pll mapping is hardcoded */
- return radeon_crtc->crtc_id;
- }
+ /* on pre-R5xx asics, the crtc to pll mapping is hardcoded */
+ /* some atombios (observed in some DCE2/DCE3) code have a bug,
+ * the matching btw pll and crtc is done through
+ * PCLK_CRTC[1|2]_CNTL (0x480/0x484) but atombios code use the
+ * pll (1 or 2) to select which register to write. ie if using
+ * pll1 it will use PCLK_CRTC1_CNTL (0x480) and if using pll2
+ * it will use PCLK_CRTC2_CNTL (0x484), it then use crtc id to
+ * choose which value to write. Which is reverse order from
+ * register logic. So only case that works is when pllid is
+ * same as crtcid or when both pll and crtc are enabled and
+ * both use same clock.
+ *
+ * So just return crtc id as if crtc and pll were hard linked
+ * together even if they aren't
+ */
+ return radeon_crtc->crtc_id;
}
}
diff --git a/drivers/gpu/drm/radeon/atombios_encoders.c b/drivers/gpu/drm/radeon/atombios_encoders.c
index ba498f8e47a2..4552d4aff317 100644
--- a/drivers/gpu/drm/radeon/atombios_encoders.c
+++ b/drivers/gpu/drm/radeon/atombios_encoders.c
@@ -340,7 +340,7 @@ static bool radeon_atom_mode_fixup(struct drm_encoder *encoder,
((radeon_encoder->active_device & (ATOM_DEVICE_DFP_SUPPORT | ATOM_DEVICE_LCD_SUPPORT)) ||
(radeon_encoder_get_dp_bridge_encoder_id(encoder) != ENCODER_OBJECT_ID_NONE))) {
struct drm_connector *connector = radeon_get_connector_for_encoder(encoder);
- radeon_dp_set_link_config(connector, mode);
+ radeon_dp_set_link_config(connector, adjusted_mode);
}
return true;
@@ -1625,7 +1625,7 @@ radeon_atom_encoder_dpms_dig(struct drm_encoder *encoder, int mode)
atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_SETUP, 0, 0);
atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_ENABLE, 0, 0);
/* some early dce3.2 boards have a bug in their transmitter control table */
- if ((rdev->family != CHIP_RV710) || (rdev->family != CHIP_RV730))
+ if ((rdev->family != CHIP_RV710) && (rdev->family != CHIP_RV730))
atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_ENABLE_OUTPUT, 0, 0);
}
if (ENCODER_MODE_IS_DP(atombios_get_encoder_mode(encoder)) && connector) {
diff --git a/drivers/gpu/drm/radeon/evergreen.c b/drivers/gpu/drm/radeon/evergreen.c
index 14313ad43b76..8dbc69a6e5bd 100644
--- a/drivers/gpu/drm/radeon/evergreen.c
+++ b/drivers/gpu/drm/radeon/evergreen.c
@@ -1330,6 +1330,8 @@ void evergreen_mc_stop(struct radeon_device *rdev, struct evergreen_mc_save *sav
break;
udelay(1);
}
+ } else {
+ save->crtc_enabled[i] = false;
}
}
@@ -1372,7 +1374,7 @@ void evergreen_mc_resume(struct radeon_device *rdev, struct evergreen_mc_save *s
WREG32(BIF_FB_EN, FB_READ_EN | FB_WRITE_EN);
for (i = 0; i < rdev->num_crtc; i++) {
- if (save->crtc_enabled) {
+ if (save->crtc_enabled[i]) {
if (ASIC_IS_DCE6(rdev)) {
tmp = RREG32(EVERGREEN_CRTC_BLANK_CONTROL + crtc_offsets[i]);
tmp |= EVERGREEN_CRTC_BLANK_DATA_EN;
@@ -1819,7 +1821,7 @@ static void evergreen_gpu_init(struct radeon_device *rdev)
case CHIP_SUMO:
rdev->config.evergreen.num_ses = 1;
rdev->config.evergreen.max_pipes = 4;
- rdev->config.evergreen.max_tile_pipes = 2;
+ rdev->config.evergreen.max_tile_pipes = 4;
if (rdev->pdev->device == 0x9648)
rdev->config.evergreen.max_simds = 3;
else if ((rdev->pdev->device == 0x9647) ||
@@ -1842,7 +1844,7 @@ static void evergreen_gpu_init(struct radeon_device *rdev)
rdev->config.evergreen.sc_prim_fifo_size = 0x40;
rdev->config.evergreen.sc_hiz_tile_fifo_size = 0x30;
rdev->config.evergreen.sc_earlyz_tile_fifo_size = 0x130;
- gb_addr_config = REDWOOD_GB_ADDR_CONFIG_GOLDEN;
+ gb_addr_config = SUMO_GB_ADDR_CONFIG_GOLDEN;
break;
case CHIP_SUMO2:
rdev->config.evergreen.num_ses = 1;
@@ -1864,7 +1866,7 @@ static void evergreen_gpu_init(struct radeon_device *rdev)
rdev->config.evergreen.sc_prim_fifo_size = 0x40;
rdev->config.evergreen.sc_hiz_tile_fifo_size = 0x30;
rdev->config.evergreen.sc_earlyz_tile_fifo_size = 0x130;
- gb_addr_config = REDWOOD_GB_ADDR_CONFIG_GOLDEN;
+ gb_addr_config = SUMO2_GB_ADDR_CONFIG_GOLDEN;
break;
case CHIP_BARTS:
rdev->config.evergreen.num_ses = 2;
@@ -1912,7 +1914,7 @@ static void evergreen_gpu_init(struct radeon_device *rdev)
break;
case CHIP_CAICOS:
rdev->config.evergreen.num_ses = 1;
- rdev->config.evergreen.max_pipes = 4;
+ rdev->config.evergreen.max_pipes = 2;
rdev->config.evergreen.max_tile_pipes = 2;
rdev->config.evergreen.max_simds = 2;
rdev->config.evergreen.max_backends = 1 * rdev->config.evergreen.num_ses;
@@ -2032,6 +2034,7 @@ static void evergreen_gpu_init(struct radeon_device *rdev)
WREG32(GB_ADDR_CONFIG, gb_addr_config);
WREG32(DMIF_ADDR_CONFIG, gb_addr_config);
WREG32(HDP_ADDR_CONFIG, gb_addr_config);
+ WREG32(DMA_TILING_CONFIG, gb_addr_config);
tmp = gb_addr_config & NUM_PIPES_MASK;
tmp = r6xx_remap_render_backend(rdev, tmp, rdev->config.evergreen.max_backends,
@@ -2401,8 +2404,12 @@ void evergreen_disable_interrupt_state(struct radeon_device *rdev)
CNTX_BUSY_INT_ENABLE | CNTX_EMPTY_INT_ENABLE);
cayman_cp_int_cntl_setup(rdev, 1, 0);
cayman_cp_int_cntl_setup(rdev, 2, 0);
+ tmp = RREG32(CAYMAN_DMA1_CNTL) & ~TRAP_ENABLE;
+ WREG32(CAYMAN_DMA1_CNTL, tmp);
} else
WREG32(CP_INT_CNTL, CNTX_BUSY_INT_ENABLE | CNTX_EMPTY_INT_ENABLE);
+ tmp = RREG32(DMA_CNTL) & ~TRAP_ENABLE;
+ WREG32(DMA_CNTL, tmp);
WREG32(GRBM_INT_CNTL, 0);
WREG32(INT_MASK + EVERGREEN_CRTC0_REGISTER_OFFSET, 0);
WREG32(INT_MASK + EVERGREEN_CRTC1_REGISTER_OFFSET, 0);
@@ -2455,6 +2462,7 @@ int evergreen_irq_set(struct radeon_device *rdev)
u32 grbm_int_cntl = 0;
u32 grph1 = 0, grph2 = 0, grph3 = 0, grph4 = 0, grph5 = 0, grph6 = 0;
u32 afmt1 = 0, afmt2 = 0, afmt3 = 0, afmt4 = 0, afmt5 = 0, afmt6 = 0;
+ u32 dma_cntl, dma_cntl1 = 0;
if (!rdev->irq.installed) {
WARN(1, "Can't enable IRQ/MSI because no handler is installed\n");
@@ -2482,6 +2490,8 @@ int evergreen_irq_set(struct radeon_device *rdev)
afmt5 = RREG32(AFMT_AUDIO_PACKET_CONTROL + EVERGREEN_CRTC4_REGISTER_OFFSET) & ~AFMT_AZ_FORMAT_WTRIG_MASK;
afmt6 = RREG32(AFMT_AUDIO_PACKET_CONTROL + EVERGREEN_CRTC5_REGISTER_OFFSET) & ~AFMT_AZ_FORMAT_WTRIG_MASK;
+ dma_cntl = RREG32(DMA_CNTL) & ~TRAP_ENABLE;
+
if (rdev->family >= CHIP_CAYMAN) {
/* enable CP interrupts on all rings */
if (atomic_read(&rdev->irq.ring_int[RADEON_RING_TYPE_GFX_INDEX])) {
@@ -2504,6 +2514,19 @@ int evergreen_irq_set(struct radeon_device *rdev)
}
}
+ if (atomic_read(&rdev->irq.ring_int[R600_RING_TYPE_DMA_INDEX])) {
+ DRM_DEBUG("r600_irq_set: sw int dma\n");
+ dma_cntl |= TRAP_ENABLE;
+ }
+
+ if (rdev->family >= CHIP_CAYMAN) {
+ dma_cntl1 = RREG32(CAYMAN_DMA1_CNTL) & ~TRAP_ENABLE;
+ if (atomic_read(&rdev->irq.ring_int[CAYMAN_RING_TYPE_DMA1_INDEX])) {
+ DRM_DEBUG("r600_irq_set: sw int dma1\n");
+ dma_cntl1 |= TRAP_ENABLE;
+ }
+ }
+
if (rdev->irq.crtc_vblank_int[0] ||
atomic_read(&rdev->irq.pflip[0])) {
DRM_DEBUG("evergreen_irq_set: vblank 0\n");
@@ -2589,6 +2612,12 @@ int evergreen_irq_set(struct radeon_device *rdev)
cayman_cp_int_cntl_setup(rdev, 2, cp_int_cntl2);
} else
WREG32(CP_INT_CNTL, cp_int_cntl);
+
+ WREG32(DMA_CNTL, dma_cntl);
+
+ if (rdev->family >= CHIP_CAYMAN)
+ WREG32(CAYMAN_DMA1_CNTL, dma_cntl1);
+
WREG32(GRBM_INT_CNTL, grbm_int_cntl);
WREG32(INT_MASK + EVERGREEN_CRTC0_REGISTER_OFFSET, crtc1);
@@ -3091,6 +3120,16 @@ restart_ih:
break;
}
break;
+ case 146:
+ case 147:
+ dev_err(rdev->dev, "GPU fault detected: %d 0x%08x\n", src_id, src_data);
+ dev_err(rdev->dev, " VM_CONTEXT1_PROTECTION_FAULT_ADDR 0x%08X\n",
+ RREG32(VM_CONTEXT1_PROTECTION_FAULT_ADDR));
+ dev_err(rdev->dev, " VM_CONTEXT1_PROTECTION_FAULT_STATUS 0x%08X\n",
+ RREG32(VM_CONTEXT1_PROTECTION_FAULT_STATUS));
+ /* reset addr and status */
+ WREG32_P(VM_CONTEXT1_CNTL2, 1, ~1);
+ break;
case 176: /* CP_INT in ring buffer */
case 177: /* CP_INT in IB1 */
case 178: /* CP_INT in IB2 */
@@ -3114,9 +3153,19 @@ restart_ih:
} else
radeon_fence_process(rdev, RADEON_RING_TYPE_GFX_INDEX);
break;
+ case 224: /* DMA trap event */
+ DRM_DEBUG("IH: DMA trap\n");
+ radeon_fence_process(rdev, R600_RING_TYPE_DMA_INDEX);
+ break;
case 233: /* GUI IDLE */
DRM_DEBUG("IH: GUI idle\n");
break;
+ case 244: /* DMA trap event */
+ if (rdev->family >= CHIP_CAYMAN) {
+ DRM_DEBUG("IH: DMA1 trap\n");
+ radeon_fence_process(rdev, CAYMAN_RING_TYPE_DMA1_INDEX);
+ }
+ break;
default:
DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
break;
@@ -3142,6 +3191,143 @@ restart_ih:
return IRQ_HANDLED;
}
+/**
+ * evergreen_dma_fence_ring_emit - emit a fence on the DMA ring
+ *
+ * @rdev: radeon_device pointer
+ * @fence: radeon fence object
+ *
+ * Add a DMA fence packet to the ring to write
+ * the fence seq number and DMA trap packet to generate
+ * an interrupt if needed (evergreen-SI).
+ */
+void evergreen_dma_fence_ring_emit(struct radeon_device *rdev,
+ struct radeon_fence *fence)
+{
+ struct radeon_ring *ring = &rdev->ring[fence->ring];
+ u64 addr = rdev->fence_drv[fence->ring].gpu_addr;
+ /* write the fence */
+ radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_FENCE, 0, 0, 0));
+ radeon_ring_write(ring, addr & 0xfffffffc);
+ radeon_ring_write(ring, (upper_32_bits(addr) & 0xff));
+ radeon_ring_write(ring, fence->seq);
+ /* generate an interrupt */
+ radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_TRAP, 0, 0, 0));
+ /* flush HDP */
+ radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_SRBM_WRITE, 0, 0, 0));
+ radeon_ring_write(ring, (0xf << 16) | HDP_MEM_COHERENCY_FLUSH_CNTL);
+ radeon_ring_write(ring, 1);
+}
+
+/**
+ * evergreen_dma_ring_ib_execute - schedule an IB on the DMA engine
+ *
+ * @rdev: radeon_device pointer
+ * @ib: IB object to schedule
+ *
+ * Schedule an IB in the DMA ring (evergreen).
+ */
+void evergreen_dma_ring_ib_execute(struct radeon_device *rdev,
+ struct radeon_ib *ib)
+{
+ struct radeon_ring *ring = &rdev->ring[ib->ring];
+
+ if (rdev->wb.enabled) {
+ u32 next_rptr = ring->wptr + 4;
+ while ((next_rptr & 7) != 5)
+ next_rptr++;
+ next_rptr += 3;
+ radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_WRITE, 0, 0, 1));
+ radeon_ring_write(ring, ring->next_rptr_gpu_addr & 0xfffffffc);
+ radeon_ring_write(ring, upper_32_bits(ring->next_rptr_gpu_addr) & 0xff);
+ radeon_ring_write(ring, next_rptr);
+ }
+
+ /* The indirect buffer packet must end on an 8 DW boundary in the DMA ring.
+ * Pad as necessary with NOPs.
+ */
+ while ((ring->wptr & 7) != 5)
+ radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_NOP, 0, 0, 0));
+ radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_INDIRECT_BUFFER, 0, 0, 0));
+ radeon_ring_write(ring, (ib->gpu_addr & 0xFFFFFFE0));
+ radeon_ring_write(ring, (ib->length_dw << 12) | (upper_32_bits(ib->gpu_addr) & 0xFF));
+
+}
+
+/**
+ * evergreen_copy_dma - copy pages using the DMA engine
+ *
+ * @rdev: radeon_device pointer
+ * @src_offset: src GPU address
+ * @dst_offset: dst GPU address
+ * @num_gpu_pages: number of GPU pages to xfer
+ * @fence: radeon fence object
+ *
+ * Copy GPU paging using the DMA engine (evergreen-cayman).
+ * Used by the radeon ttm implementation to move pages if
+ * registered as the asic copy callback.
+ */
+int evergreen_copy_dma(struct radeon_device *rdev,
+ uint64_t src_offset, uint64_t dst_offset,
+ unsigned num_gpu_pages,
+ struct radeon_fence **fence)
+{
+ struct radeon_semaphore *sem = NULL;
+ int ring_index = rdev->asic->copy.dma_ring_index;
+ struct radeon_ring *ring = &rdev->ring[ring_index];
+ u32 size_in_dw, cur_size_in_dw;
+ int i, num_loops;
+ int r = 0;
+
+ r = radeon_semaphore_create(rdev, &sem);
+ if (r) {
+ DRM_ERROR("radeon: moving bo (%d).\n", r);
+ return r;
+ }
+
+ size_in_dw = (num_gpu_pages << RADEON_GPU_PAGE_SHIFT) / 4;
+ num_loops = DIV_ROUND_UP(size_in_dw, 0xfffff);
+ r = radeon_ring_lock(rdev, ring, num_loops * 5 + 11);
+ if (r) {
+ DRM_ERROR("radeon: moving bo (%d).\n", r);
+ radeon_semaphore_free(rdev, &sem, NULL);
+ return r;
+ }
+
+ if (radeon_fence_need_sync(*fence, ring->idx)) {
+ radeon_semaphore_sync_rings(rdev, sem, (*fence)->ring,
+ ring->idx);
+ radeon_fence_note_sync(*fence, ring->idx);
+ } else {
+ radeon_semaphore_free(rdev, &sem, NULL);
+ }
+
+ for (i = 0; i < num_loops; i++) {
+ cur_size_in_dw = size_in_dw;
+ if (cur_size_in_dw > 0xFFFFF)
+ cur_size_in_dw = 0xFFFFF;
+ size_in_dw -= cur_size_in_dw;
+ radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_COPY, 0, 0, cur_size_in_dw));
+ radeon_ring_write(ring, dst_offset & 0xfffffffc);
+ radeon_ring_write(ring, src_offset & 0xfffffffc);
+ radeon_ring_write(ring, upper_32_bits(dst_offset) & 0xff);
+ radeon_ring_write(ring, upper_32_bits(src_offset) & 0xff);
+ src_offset += cur_size_in_dw * 4;
+ dst_offset += cur_size_in_dw * 4;
+ }
+
+ r = radeon_fence_emit(rdev, fence, ring->idx);
+ if (r) {
+ radeon_ring_unlock_undo(rdev, ring);
+ return r;
+ }
+
+ radeon_ring_unlock_commit(rdev, ring);
+ radeon_semaphore_free(rdev, &sem, *fence);
+
+ return r;
+}
+
static int evergreen_startup(struct radeon_device *rdev)
{
struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
@@ -3205,6 +3391,12 @@ static int evergreen_startup(struct radeon_device *rdev)
return r;
}
+ r = radeon_fence_driver_start_ring(rdev, R600_RING_TYPE_DMA_INDEX);
+ if (r) {
+ dev_err(rdev->dev, "failed initializing DMA fences (%d).\n", r);
+ return r;
+ }
+
/* Enable IRQ */
r = r600_irq_init(rdev);
if (r) {
@@ -3219,12 +3411,23 @@ static int evergreen_startup(struct radeon_device *rdev)
0, 0xfffff, RADEON_CP_PACKET2);
if (r)
return r;
+
+ ring = &rdev->ring[R600_RING_TYPE_DMA_INDEX];
+ r = radeon_ring_init(rdev, ring, ring->ring_size, R600_WB_DMA_RPTR_OFFSET,
+ DMA_RB_RPTR, DMA_RB_WPTR,
+ 2, 0x3fffc, DMA_PACKET(DMA_PACKET_NOP, 0, 0, 0));
+ if (r)
+ return r;
+
r = evergreen_cp_load_microcode(rdev);
if (r)
return r;
r = evergreen_cp_resume(rdev);
if (r)
return r;
+ r = r600_dma_resume(rdev);
+ if (r)
+ return r;
r = radeon_ib_pool_init(rdev);
if (r) {
@@ -3271,11 +3474,9 @@ int evergreen_resume(struct radeon_device *rdev)
int evergreen_suspend(struct radeon_device *rdev)
{
- struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
-
r600_audio_fini(rdev);
r700_cp_stop(rdev);
- ring->ready = false;
+ r600_dma_stop(rdev);
evergreen_irq_suspend(rdev);
radeon_wb_disable(rdev);
evergreen_pcie_gart_disable(rdev);
@@ -3352,6 +3553,9 @@ int evergreen_init(struct radeon_device *rdev)
rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ring_obj = NULL;
r600_ring_init(rdev, &rdev->ring[RADEON_RING_TYPE_GFX_INDEX], 1024 * 1024);
+ rdev->ring[R600_RING_TYPE_DMA_INDEX].ring_obj = NULL;
+ r600_ring_init(rdev, &rdev->ring[R600_RING_TYPE_DMA_INDEX], 64 * 1024);
+
rdev->ih.ring_obj = NULL;
r600_ih_ring_init(rdev, 64 * 1024);
@@ -3364,6 +3568,7 @@ int evergreen_init(struct radeon_device *rdev)
if (r) {
dev_err(rdev->dev, "disabling GPU acceleration\n");
r700_cp_fini(rdev);
+ r600_dma_fini(rdev);
r600_irq_fini(rdev);
radeon_wb_fini(rdev);
radeon_ib_pool_fini(rdev);
@@ -3391,6 +3596,7 @@ void evergreen_fini(struct radeon_device *rdev)
r600_audio_fini(rdev);
r600_blit_fini(rdev);
r700_cp_fini(rdev);
+ r600_dma_fini(rdev);
r600_irq_fini(rdev);
radeon_wb_fini(rdev);
radeon_ib_pool_fini(rdev);
diff --git a/drivers/gpu/drm/radeon/evergreen_cs.c b/drivers/gpu/drm/radeon/evergreen_cs.c
index 95e6318b6268..74c6b42d2597 100644
--- a/drivers/gpu/drm/radeon/evergreen_cs.c
+++ b/drivers/gpu/drm/radeon/evergreen_cs.c
@@ -34,6 +34,8 @@
#define MAX(a,b) (((a)>(b))?(a):(b))
#define MIN(a,b) (((a)<(b))?(a):(b))
+int r600_dma_cs_next_reloc(struct radeon_cs_parser *p,
+ struct radeon_cs_reloc **cs_reloc);
static int evergreen_cs_packet_next_reloc(struct radeon_cs_parser *p,
struct radeon_cs_reloc **cs_reloc);
@@ -507,20 +509,28 @@ static int evergreen_cs_track_validate_htile(struct radeon_cs_parser *p,
/* height is npipes htiles aligned == npipes * 8 pixel aligned */
nby = round_up(nby, track->npipes * 8);
} else {
+ /* always assume 8x8 htile */
+ /* align is htile align * 8, htile align vary according to
+ * number of pipe and tile width and nby
+ */
switch (track->npipes) {
case 8:
+ /* HTILE_WIDTH = 8 & HTILE_HEIGHT = 8*/
nbx = round_up(nbx, 64 * 8);
nby = round_up(nby, 64 * 8);
break;
case 4:
+ /* HTILE_WIDTH = 8 & HTILE_HEIGHT = 8*/
nbx = round_up(nbx, 64 * 8);
nby = round_up(nby, 32 * 8);
break;
case 2:
+ /* HTILE_WIDTH = 8 & HTILE_HEIGHT = 8*/
nbx = round_up(nbx, 32 * 8);
nby = round_up(nby, 32 * 8);
break;
case 1:
+ /* HTILE_WIDTH = 8 & HTILE_HEIGHT = 8*/
nbx = round_up(nbx, 32 * 8);
nby = round_up(nby, 16 * 8);
break;
@@ -531,9 +541,10 @@ static int evergreen_cs_track_validate_htile(struct radeon_cs_parser *p,
}
}
/* compute number of htile */
- nbx = nbx / 8;
- nby = nby / 8;
- size = nbx * nby * 4;
+ nbx = nbx >> 3;
+ nby = nby >> 3;
+ /* size must be aligned on npipes * 2K boundary */
+ size = roundup(nbx * nby * 4, track->npipes * (2 << 10));
size += track->htile_offset;
if (size > radeon_bo_size(track->htile_bo)) {
@@ -1790,6 +1801,8 @@ static int evergreen_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
case DB_HTILE_SURFACE:
/* 8x8 only */
track->htile_surface = radeon_get_ib_value(p, idx);
+ /* force 8x8 htile width and height */
+ ib[idx] |= 3;
track->db_dirty = true;
break;
case CB_IMMED0_BASE:
@@ -2232,6 +2245,107 @@ static int evergreen_packet3_check(struct radeon_cs_parser *p,
ib[idx+2] = upper_32_bits(offset) & 0xff;
}
break;
+ case PACKET3_CP_DMA:
+ {
+ u32 command, size, info;
+ u64 offset, tmp;
+ if (pkt->count != 4) {
+ DRM_ERROR("bad CP DMA\n");
+ return -EINVAL;
+ }
+ command = radeon_get_ib_value(p, idx+4);
+ size = command & 0x1fffff;
+ info = radeon_get_ib_value(p, idx+1);
+ if ((((info & 0x60000000) >> 29) != 0) || /* src = GDS or DATA */
+ (((info & 0x00300000) >> 20) != 0) || /* dst = GDS */
+ ((((info & 0x00300000) >> 20) == 0) &&
+ (command & PACKET3_CP_DMA_CMD_DAS)) || /* dst = register */
+ ((((info & 0x60000000) >> 29) == 0) &&
+ (command & PACKET3_CP_DMA_CMD_SAS))) { /* src = register */
+ /* non mem to mem copies requires dw aligned count */
+ if (size % 4) {
+ DRM_ERROR("CP DMA command requires dw count alignment\n");
+ return -EINVAL;
+ }
+ }
+ if (command & PACKET3_CP_DMA_CMD_SAS) {
+ /* src address space is register */
+ /* GDS is ok */
+ if (((info & 0x60000000) >> 29) != 1) {
+ DRM_ERROR("CP DMA SAS not supported\n");
+ return -EINVAL;
+ }
+ } else {
+ if (command & PACKET3_CP_DMA_CMD_SAIC) {
+ DRM_ERROR("CP DMA SAIC only supported for registers\n");
+ return -EINVAL;
+ }
+ /* src address space is memory */
+ if (((info & 0x60000000) >> 29) == 0) {
+ r = evergreen_cs_packet_next_reloc(p, &reloc);
+ if (r) {
+ DRM_ERROR("bad CP DMA SRC\n");
+ return -EINVAL;
+ }
+
+ tmp = radeon_get_ib_value(p, idx) +
+ ((u64)(radeon_get_ib_value(p, idx+1) & 0xff) << 32);
+
+ offset = reloc->lobj.gpu_offset + tmp;
+
+ if ((tmp + size) > radeon_bo_size(reloc->robj)) {
+ dev_warn(p->dev, "CP DMA src buffer too small (%llu %lu)\n",
+ tmp + size, radeon_bo_size(reloc->robj));
+ return -EINVAL;
+ }
+
+ ib[idx] = offset;
+ ib[idx+1] = (ib[idx+1] & 0xffffff00) | (upper_32_bits(offset) & 0xff);
+ } else if (((info & 0x60000000) >> 29) != 2) {
+ DRM_ERROR("bad CP DMA SRC_SEL\n");
+ return -EINVAL;
+ }
+ }
+ if (command & PACKET3_CP_DMA_CMD_DAS) {
+ /* dst address space is register */
+ /* GDS is ok */
+ if (((info & 0x00300000) >> 20) != 1) {
+ DRM_ERROR("CP DMA DAS not supported\n");
+ return -EINVAL;
+ }
+ } else {
+ /* dst address space is memory */
+ if (command & PACKET3_CP_DMA_CMD_DAIC) {
+ DRM_ERROR("CP DMA DAIC only supported for registers\n");
+ return -EINVAL;
+ }
+ if (((info & 0x00300000) >> 20) == 0) {
+ r = evergreen_cs_packet_next_reloc(p, &reloc);
+ if (r) {
+ DRM_ERROR("bad CP DMA DST\n");
+ return -EINVAL;
+ }
+
+ tmp = radeon_get_ib_value(p, idx+2) +
+ ((u64)(radeon_get_ib_value(p, idx+3) & 0xff) << 32);
+
+ offset = reloc->lobj.gpu_offset + tmp;
+
+ if ((tmp + size) > radeon_bo_size(reloc->robj)) {
+ dev_warn(p->dev, "CP DMA dst buffer too small (%llu %lu)\n",
+ tmp + size, radeon_bo_size(reloc->robj));
+ return -EINVAL;
+ }
+
+ ib[idx+2] = offset;
+ ib[idx+3] = upper_32_bits(offset) & 0xff;
+ } else {
+ DRM_ERROR("bad CP DMA DST_SEL\n");
+ return -EINVAL;
+ }
+ }
+ break;
+ }
case PACKET3_SURFACE_SYNC:
if (pkt->count != 3) {
DRM_ERROR("bad SURFACE_SYNC\n");
@@ -2715,6 +2829,455 @@ int evergreen_cs_parse(struct radeon_cs_parser *p)
return 0;
}
+/*
+ * DMA
+ */
+
+#define GET_DMA_CMD(h) (((h) & 0xf0000000) >> 28)
+#define GET_DMA_COUNT(h) ((h) & 0x000fffff)
+#define GET_DMA_T(h) (((h) & 0x00800000) >> 23)
+#define GET_DMA_NEW(h) (((h) & 0x04000000) >> 26)
+#define GET_DMA_MISC(h) (((h) & 0x0700000) >> 20)
+
+/**
+ * evergreen_dma_cs_parse() - parse the DMA IB
+ * @p: parser structure holding parsing context.
+ *
+ * Parses the DMA IB from the CS ioctl and updates
+ * the GPU addresses based on the reloc information and
+ * checks for errors. (Evergreen-Cayman)
+ * Returns 0 for success and an error on failure.
+ **/
+int evergreen_dma_cs_parse(struct radeon_cs_parser *p)
+{
+ struct radeon_cs_chunk *ib_chunk = &p->chunks[p->chunk_ib_idx];
+ struct radeon_cs_reloc *src_reloc, *dst_reloc, *dst2_reloc;
+ u32 header, cmd, count, tiled, new_cmd, misc;
+ volatile u32 *ib = p->ib.ptr;
+ u32 idx, idx_value;
+ u64 src_offset, dst_offset, dst2_offset;
+ int r;
+
+ do {
+ if (p->idx >= ib_chunk->length_dw) {
+ DRM_ERROR("Can not parse packet at %d after CS end %d !\n",
+ p->idx, ib_chunk->length_dw);
+ return -EINVAL;
+ }
+ idx = p->idx;
+ header = radeon_get_ib_value(p, idx);
+ cmd = GET_DMA_CMD(header);
+ count = GET_DMA_COUNT(header);
+ tiled = GET_DMA_T(header);
+ new_cmd = GET_DMA_NEW(header);
+ misc = GET_DMA_MISC(header);
+
+ switch (cmd) {
+ case DMA_PACKET_WRITE:
+ r = r600_dma_cs_next_reloc(p, &dst_reloc);
+ if (r) {
+ DRM_ERROR("bad DMA_PACKET_WRITE\n");
+ return -EINVAL;
+ }
+ if (tiled) {
+ dst_offset = ib[idx+1];
+ dst_offset <<= 8;
+
+ ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset >> 8);
+ p->idx += count + 7;
+ } else {
+ dst_offset = ib[idx+1];
+ dst_offset |= ((u64)(ib[idx+2] & 0xff)) << 32;
+
+ ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset & 0xfffffffc);
+ ib[idx+2] += upper_32_bits(dst_reloc->lobj.gpu_offset) & 0xff;
+ p->idx += count + 3;
+ }
+ if ((dst_offset + (count * 4)) > radeon_bo_size(dst_reloc->robj)) {
+ dev_warn(p->dev, "DMA write buffer too small (%llu %lu)\n",
+ dst_offset, radeon_bo_size(dst_reloc->robj));
+ return -EINVAL;
+ }
+ break;
+ case DMA_PACKET_COPY:
+ r = r600_dma_cs_next_reloc(p, &src_reloc);
+ if (r) {
+ DRM_ERROR("bad DMA_PACKET_COPY\n");
+ return -EINVAL;
+ }
+ r = r600_dma_cs_next_reloc(p, &dst_reloc);
+ if (r) {
+ DRM_ERROR("bad DMA_PACKET_COPY\n");
+ return -EINVAL;
+ }
+ if (tiled) {
+ idx_value = radeon_get_ib_value(p, idx + 2);
+ if (new_cmd) {
+ switch (misc) {
+ case 0:
+ /* L2T, frame to fields */
+ if (idx_value & (1 << 31)) {
+ DRM_ERROR("bad L2T, frame to fields DMA_PACKET_COPY\n");
+ return -EINVAL;
+ }
+ r = r600_dma_cs_next_reloc(p, &dst2_reloc);
+ if (r) {
+ DRM_ERROR("bad L2T, frame to fields DMA_PACKET_COPY\n");
+ return -EINVAL;
+ }
+ dst_offset = ib[idx+1];
+ dst_offset <<= 8;
+ dst2_offset = ib[idx+2];
+ dst2_offset <<= 8;
+ src_offset = ib[idx+8];
+ src_offset |= ((u64)(ib[idx+9] & 0xff)) << 32;
+ if ((src_offset + (count * 4)) > radeon_bo_size(src_reloc->robj)) {
+ dev_warn(p->dev, "DMA L2T, frame to fields src buffer too small (%llu %lu)\n",
+ src_offset + (count * 4), radeon_bo_size(src_reloc->robj));
+ return -EINVAL;
+ }
+ if ((dst_offset + (count * 4)) > radeon_bo_size(dst_reloc->robj)) {
+ dev_warn(p->dev, "DMA L2T, frame to fields buffer too small (%llu %lu)\n",
+ dst_offset + (count * 4), radeon_bo_size(dst_reloc->robj));
+ return -EINVAL;
+ }
+ if ((dst2_offset + (count * 4)) > radeon_bo_size(dst2_reloc->robj)) {
+ dev_warn(p->dev, "DMA L2T, frame to fields buffer too small (%llu %lu)\n",
+ dst2_offset + (count * 4), radeon_bo_size(dst2_reloc->robj));
+ return -EINVAL;
+ }
+ ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset >> 8);
+ ib[idx+2] += (u32)(dst2_reloc->lobj.gpu_offset >> 8);
+ ib[idx+8] += (u32)(src_reloc->lobj.gpu_offset & 0xfffffffc);
+ ib[idx+9] += upper_32_bits(src_reloc->lobj.gpu_offset) & 0xff;
+ p->idx += 10;
+ break;
+ case 1:
+ /* L2T, T2L partial */
+ if (p->family < CHIP_CAYMAN) {
+ DRM_ERROR("L2T, T2L Partial is cayman only !\n");
+ return -EINVAL;
+ }
+ /* detile bit */
+ if (idx_value & (1 << 31)) {
+ /* tiled src, linear dst */
+ ib[idx+1] += (u32)(src_reloc->lobj.gpu_offset >> 8);
+
+ ib[idx+7] += (u32)(dst_reloc->lobj.gpu_offset & 0xfffffffc);
+ ib[idx+8] += upper_32_bits(dst_reloc->lobj.gpu_offset) & 0xff;
+ } else {
+ /* linear src, tiled dst */
+ ib[idx+7] += (u32)(src_reloc->lobj.gpu_offset & 0xfffffffc);
+ ib[idx+8] += upper_32_bits(src_reloc->lobj.gpu_offset) & 0xff;
+
+ ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset >> 8);
+ }
+ p->idx += 12;
+ break;
+ case 3:
+ /* L2T, broadcast */
+ if (idx_value & (1 << 31)) {
+ DRM_ERROR("bad L2T, broadcast DMA_PACKET_COPY\n");
+ return -EINVAL;
+ }
+ r = r600_dma_cs_next_reloc(p, &dst2_reloc);
+ if (r) {
+ DRM_ERROR("bad L2T, broadcast DMA_PACKET_COPY\n");
+ return -EINVAL;
+ }
+ dst_offset = ib[idx+1];
+ dst_offset <<= 8;
+ dst2_offset = ib[idx+2];
+ dst2_offset <<= 8;
+ src_offset = ib[idx+8];
+ src_offset |= ((u64)(ib[idx+9] & 0xff)) << 32;
+ if ((src_offset + (count * 4)) > radeon_bo_size(src_reloc->robj)) {
+ dev_warn(p->dev, "DMA L2T, broadcast src buffer too small (%llu %lu)\n",
+ src_offset + (count * 4), radeon_bo_size(src_reloc->robj));
+ return -EINVAL;
+ }
+ if ((dst_offset + (count * 4)) > radeon_bo_size(dst_reloc->robj)) {
+ dev_warn(p->dev, "DMA L2T, broadcast dst buffer too small (%llu %lu)\n",
+ dst_offset + (count * 4), radeon_bo_size(dst_reloc->robj));
+ return -EINVAL;
+ }
+ if ((dst2_offset + (count * 4)) > radeon_bo_size(dst2_reloc->robj)) {
+ dev_warn(p->dev, "DMA L2T, broadcast dst2 buffer too small (%llu %lu)\n",
+ dst2_offset + (count * 4), radeon_bo_size(dst2_reloc->robj));
+ return -EINVAL;
+ }
+ ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset >> 8);
+ ib[idx+2] += (u32)(dst2_reloc->lobj.gpu_offset >> 8);
+ ib[idx+8] += (u32)(src_reloc->lobj.gpu_offset & 0xfffffffc);
+ ib[idx+9] += upper_32_bits(src_reloc->lobj.gpu_offset) & 0xff;
+ p->idx += 10;
+ break;
+ case 4:
+ /* L2T, T2L */
+ /* detile bit */
+ if (idx_value & (1 << 31)) {
+ /* tiled src, linear dst */
+ src_offset = ib[idx+1];
+ src_offset <<= 8;
+ ib[idx+1] += (u32)(src_reloc->lobj.gpu_offset >> 8);
+
+ dst_offset = ib[idx+7];
+ dst_offset |= ((u64)(ib[idx+8] & 0xff)) << 32;
+ ib[idx+7] += (u32)(dst_reloc->lobj.gpu_offset & 0xfffffffc);
+ ib[idx+8] += upper_32_bits(dst_reloc->lobj.gpu_offset) & 0xff;
+ } else {
+ /* linear src, tiled dst */
+ src_offset = ib[idx+7];
+ src_offset |= ((u64)(ib[idx+8] & 0xff)) << 32;
+ ib[idx+7] += (u32)(src_reloc->lobj.gpu_offset & 0xfffffffc);
+ ib[idx+8] += upper_32_bits(src_reloc->lobj.gpu_offset) & 0xff;
+
+ dst_offset = ib[idx+1];
+ dst_offset <<= 8;
+ ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset >> 8);
+ }
+ if ((src_offset + (count * 4)) > radeon_bo_size(src_reloc->robj)) {
+ dev_warn(p->dev, "DMA L2T, T2L src buffer too small (%llu %lu)\n",
+ src_offset + (count * 4), radeon_bo_size(src_reloc->robj));
+ return -EINVAL;
+ }
+ if ((dst_offset + (count * 4)) > radeon_bo_size(dst_reloc->robj)) {
+ dev_warn(p->dev, "DMA L2T, T2L dst buffer too small (%llu %lu)\n",
+ dst_offset + (count * 4), radeon_bo_size(dst_reloc->robj));
+ return -EINVAL;
+ }
+ p->idx += 9;
+ break;
+ case 5:
+ /* T2T partial */
+ if (p->family < CHIP_CAYMAN) {
+ DRM_ERROR("L2T, T2L Partial is cayman only !\n");
+ return -EINVAL;
+ }
+ ib[idx+1] += (u32)(src_reloc->lobj.gpu_offset >> 8);
+ ib[idx+4] += (u32)(dst_reloc->lobj.gpu_offset >> 8);
+ p->idx += 13;
+ break;
+ case 7:
+ /* L2T, broadcast */
+ if (idx_value & (1 << 31)) {
+ DRM_ERROR("bad L2T, broadcast DMA_PACKET_COPY\n");
+ return -EINVAL;
+ }
+ r = r600_dma_cs_next_reloc(p, &dst2_reloc);
+ if (r) {
+ DRM_ERROR("bad L2T, broadcast DMA_PACKET_COPY\n");
+ return -EINVAL;
+ }
+ dst_offset = ib[idx+1];
+ dst_offset <<= 8;
+ dst2_offset = ib[idx+2];
+ dst2_offset <<= 8;
+ src_offset = ib[idx+8];
+ src_offset |= ((u64)(ib[idx+9] & 0xff)) << 32;
+ if ((src_offset + (count * 4)) > radeon_bo_size(src_reloc->robj)) {
+ dev_warn(p->dev, "DMA L2T, broadcast src buffer too small (%llu %lu)\n",
+ src_offset + (count * 4), radeon_bo_size(src_reloc->robj));
+ return -EINVAL;
+ }
+ if ((dst_offset + (count * 4)) > radeon_bo_size(dst_reloc->robj)) {
+ dev_warn(p->dev, "DMA L2T, broadcast dst buffer too small (%llu %lu)\n",
+ dst_offset + (count * 4), radeon_bo_size(dst_reloc->robj));
+ return -EINVAL;
+ }
+ if ((dst2_offset + (count * 4)) > radeon_bo_size(dst2_reloc->robj)) {
+ dev_warn(p->dev, "DMA L2T, broadcast dst2 buffer too small (%llu %lu)\n",
+ dst2_offset + (count * 4), radeon_bo_size(dst2_reloc->robj));
+ return -EINVAL;
+ }
+ ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset >> 8);
+ ib[idx+2] += (u32)(dst2_reloc->lobj.gpu_offset >> 8);
+ ib[idx+8] += (u32)(src_reloc->lobj.gpu_offset & 0xfffffffc);
+ ib[idx+9] += upper_32_bits(src_reloc->lobj.gpu_offset) & 0xff;
+ p->idx += 10;
+ break;
+ default:
+ DRM_ERROR("bad DMA_PACKET_COPY misc %u\n", misc);
+ return -EINVAL;
+ }
+ } else {
+ switch (misc) {
+ case 0:
+ /* detile bit */
+ if (idx_value & (1 << 31)) {
+ /* tiled src, linear dst */
+ src_offset = ib[idx+1];
+ src_offset <<= 8;
+ ib[idx+1] += (u32)(src_reloc->lobj.gpu_offset >> 8);
+
+ dst_offset = ib[idx+7];
+ dst_offset |= ((u64)(ib[idx+8] & 0xff)) << 32;
+ ib[idx+7] += (u32)(dst_reloc->lobj.gpu_offset & 0xfffffffc);
+ ib[idx+8] += upper_32_bits(dst_reloc->lobj.gpu_offset) & 0xff;
+ } else {
+ /* linear src, tiled dst */
+ src_offset = ib[idx+7];
+ src_offset |= ((u64)(ib[idx+8] & 0xff)) << 32;
+ ib[idx+7] += (u32)(src_reloc->lobj.gpu_offset & 0xfffffffc);
+ ib[idx+8] += upper_32_bits(src_reloc->lobj.gpu_offset) & 0xff;
+
+ dst_offset = ib[idx+1];
+ dst_offset <<= 8;
+ ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset >> 8);
+ }
+ if ((src_offset + (count * 4)) > radeon_bo_size(src_reloc->robj)) {
+ dev_warn(p->dev, "DMA L2T, broadcast src buffer too small (%llu %lu)\n",
+ src_offset + (count * 4), radeon_bo_size(src_reloc->robj));
+ return -EINVAL;
+ }
+ if ((dst_offset + (count * 4)) > radeon_bo_size(dst_reloc->robj)) {
+ dev_warn(p->dev, "DMA L2T, broadcast dst buffer too small (%llu %lu)\n",
+ dst_offset + (count * 4), radeon_bo_size(dst_reloc->robj));
+ return -EINVAL;
+ }
+ p->idx += 9;
+ break;
+ default:
+ DRM_ERROR("bad DMA_PACKET_COPY misc %u\n", misc);
+ return -EINVAL;
+ }
+ }
+ } else {
+ if (new_cmd) {
+ switch (misc) {
+ case 0:
+ /* L2L, byte */
+ src_offset = ib[idx+2];
+ src_offset |= ((u64)(ib[idx+4] & 0xff)) << 32;
+ dst_offset = ib[idx+1];
+ dst_offset |= ((u64)(ib[idx+3] & 0xff)) << 32;
+ if ((src_offset + count) > radeon_bo_size(src_reloc->robj)) {
+ dev_warn(p->dev, "DMA L2L, byte src buffer too small (%llu %lu)\n",
+ src_offset + count, radeon_bo_size(src_reloc->robj));
+ return -EINVAL;
+ }
+ if ((dst_offset + count) > radeon_bo_size(dst_reloc->robj)) {
+ dev_warn(p->dev, "DMA L2L, byte dst buffer too small (%llu %lu)\n",
+ dst_offset + count, radeon_bo_size(dst_reloc->robj));
+ return -EINVAL;
+ }
+ ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset & 0xffffffff);
+ ib[idx+2] += (u32)(src_reloc->lobj.gpu_offset & 0xffffffff);
+ ib[idx+3] += upper_32_bits(dst_reloc->lobj.gpu_offset) & 0xff;
+ ib[idx+4] += upper_32_bits(src_reloc->lobj.gpu_offset) & 0xff;
+ p->idx += 5;
+ break;
+ case 1:
+ /* L2L, partial */
+ if (p->family < CHIP_CAYMAN) {
+ DRM_ERROR("L2L Partial is cayman only !\n");
+ return -EINVAL;
+ }
+ ib[idx+1] += (u32)(src_reloc->lobj.gpu_offset & 0xffffffff);
+ ib[idx+2] += upper_32_bits(src_reloc->lobj.gpu_offset) & 0xff;
+ ib[idx+4] += (u32)(dst_reloc->lobj.gpu_offset & 0xffffffff);
+ ib[idx+5] += upper_32_bits(dst_reloc->lobj.gpu_offset) & 0xff;
+
+ p->idx += 9;
+ break;
+ case 4:
+ /* L2L, dw, broadcast */
+ r = r600_dma_cs_next_reloc(p, &dst2_reloc);
+ if (r) {
+ DRM_ERROR("bad L2L, dw, broadcast DMA_PACKET_COPY\n");
+ return -EINVAL;
+ }
+ dst_offset = ib[idx+1];
+ dst_offset |= ((u64)(ib[idx+4] & 0xff)) << 32;
+ dst2_offset = ib[idx+2];
+ dst2_offset |= ((u64)(ib[idx+5] & 0xff)) << 32;
+ src_offset = ib[idx+3];
+ src_offset |= ((u64)(ib[idx+6] & 0xff)) << 32;
+ if ((src_offset + (count * 4)) > radeon_bo_size(src_reloc->robj)) {
+ dev_warn(p->dev, "DMA L2L, dw, broadcast src buffer too small (%llu %lu)\n",
+ src_offset + (count * 4), radeon_bo_size(src_reloc->robj));
+ return -EINVAL;
+ }
+ if ((dst_offset + (count * 4)) > radeon_bo_size(dst_reloc->robj)) {
+ dev_warn(p->dev, "DMA L2L, dw, broadcast dst buffer too small (%llu %lu)\n",
+ dst_offset + (count * 4), radeon_bo_size(dst_reloc->robj));
+ return -EINVAL;
+ }
+ if ((dst2_offset + (count * 4)) > radeon_bo_size(dst2_reloc->robj)) {
+ dev_warn(p->dev, "DMA L2L, dw, broadcast dst2 buffer too small (%llu %lu)\n",
+ dst2_offset + (count * 4), radeon_bo_size(dst2_reloc->robj));
+ return -EINVAL;
+ }
+ ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset & 0xfffffffc);
+ ib[idx+2] += (u32)(dst2_reloc->lobj.gpu_offset & 0xfffffffc);
+ ib[idx+3] += (u32)(src_reloc->lobj.gpu_offset & 0xfffffffc);
+ ib[idx+4] += upper_32_bits(dst_reloc->lobj.gpu_offset) & 0xff;
+ ib[idx+5] += upper_32_bits(dst2_reloc->lobj.gpu_offset) & 0xff;
+ ib[idx+6] += upper_32_bits(src_reloc->lobj.gpu_offset) & 0xff;
+ p->idx += 7;
+ break;
+ default:
+ DRM_ERROR("bad DMA_PACKET_COPY misc %u\n", misc);
+ return -EINVAL;
+ }
+ } else {
+ /* L2L, dw */
+ src_offset = ib[idx+2];
+ src_offset |= ((u64)(ib[idx+4] & 0xff)) << 32;
+ dst_offset = ib[idx+1];
+ dst_offset |= ((u64)(ib[idx+3] & 0xff)) << 32;
+ if ((src_offset + (count * 4)) > radeon_bo_size(src_reloc->robj)) {
+ dev_warn(p->dev, "DMA L2L, dw src buffer too small (%llu %lu)\n",
+ src_offset + (count * 4), radeon_bo_size(src_reloc->robj));
+ return -EINVAL;
+ }
+ if ((dst_offset + (count * 4)) > radeon_bo_size(dst_reloc->robj)) {
+ dev_warn(p->dev, "DMA L2L, dw dst buffer too small (%llu %lu)\n",
+ dst_offset + (count * 4), radeon_bo_size(dst_reloc->robj));
+ return -EINVAL;
+ }
+ ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset & 0xfffffffc);
+ ib[idx+2] += (u32)(src_reloc->lobj.gpu_offset & 0xfffffffc);
+ ib[idx+3] += upper_32_bits(dst_reloc->lobj.gpu_offset) & 0xff;
+ ib[idx+4] += upper_32_bits(src_reloc->lobj.gpu_offset) & 0xff;
+ p->idx += 5;
+ }
+ }
+ break;
+ case DMA_PACKET_CONSTANT_FILL:
+ r = r600_dma_cs_next_reloc(p, &dst_reloc);
+ if (r) {
+ DRM_ERROR("bad DMA_PACKET_CONSTANT_FILL\n");
+ return -EINVAL;
+ }
+ dst_offset = ib[idx+1];
+ dst_offset |= ((u64)(ib[idx+3] & 0x00ff0000)) << 16;
+ if ((dst_offset + (count * 4)) > radeon_bo_size(dst_reloc->robj)) {
+ dev_warn(p->dev, "DMA constant fill buffer too small (%llu %lu)\n",
+ dst_offset, radeon_bo_size(dst_reloc->robj));
+ return -EINVAL;
+ }
+ ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset & 0xfffffffc);
+ ib[idx+3] += (upper_32_bits(dst_reloc->lobj.gpu_offset) << 16) & 0x00ff0000;
+ p->idx += 4;
+ break;
+ case DMA_PACKET_NOP:
+ p->idx += 1;
+ break;
+ default:
+ DRM_ERROR("Unknown packet type %d at %d !\n", cmd, idx);
+ return -EINVAL;
+ }
+ } while (p->idx < p->chunks[p->chunk_ib_idx].length_dw);
+#if 0
+ for (r = 0; r < p->ib->length_dw; r++) {
+ printk(KERN_INFO "%05d 0x%08X\n", r, p->ib.ptr[r]);
+ mdelay(1);
+ }
+#endif
+ return 0;
+}
+
/* vm parser */
static bool evergreen_vm_reg_valid(u32 reg)
{
@@ -2725,6 +3288,9 @@ static bool evergreen_vm_reg_valid(u32 reg)
/* check config regs */
switch (reg) {
case GRBM_GFX_INDEX:
+ case CP_STRMOUT_CNTL:
+ case CP_COHER_CNTL:
+ case CP_COHER_SIZE:
case VGT_VTX_VECT_EJECT_REG:
case VGT_CACHE_INVALIDATION:
case VGT_GS_VERTEX_REUSE:
@@ -2840,6 +3406,7 @@ static int evergreen_vm_packet3_check(struct radeon_device *rdev,
u32 idx = pkt->idx + 1;
u32 idx_value = ib[idx];
u32 start_reg, end_reg, reg, i;
+ u32 command, info;
switch (pkt->opcode) {
case PACKET3_NOP:
@@ -2914,6 +3481,64 @@ static int evergreen_vm_packet3_check(struct radeon_device *rdev,
return -EINVAL;
}
break;
+ case PACKET3_CP_DMA:
+ command = ib[idx + 4];
+ info = ib[idx + 1];
+ if ((((info & 0x60000000) >> 29) != 0) || /* src = GDS or DATA */
+ (((info & 0x00300000) >> 20) != 0) || /* dst = GDS */
+ ((((info & 0x00300000) >> 20) == 0) &&
+ (command & PACKET3_CP_DMA_CMD_DAS)) || /* dst = register */
+ ((((info & 0x60000000) >> 29) == 0) &&
+ (command & PACKET3_CP_DMA_CMD_SAS))) { /* src = register */
+ /* non mem to mem copies requires dw aligned count */
+ if ((command & 0x1fffff) % 4) {
+ DRM_ERROR("CP DMA command requires dw count alignment\n");
+ return -EINVAL;
+ }
+ }
+ if (command & PACKET3_CP_DMA_CMD_SAS) {
+ /* src address space is register */
+ if (((info & 0x60000000) >> 29) == 0) {
+ start_reg = idx_value << 2;
+ if (command & PACKET3_CP_DMA_CMD_SAIC) {
+ reg = start_reg;
+ if (!evergreen_vm_reg_valid(reg)) {
+ DRM_ERROR("CP DMA Bad SRC register\n");
+ return -EINVAL;
+ }
+ } else {
+ for (i = 0; i < (command & 0x1fffff); i++) {
+ reg = start_reg + (4 * i);
+ if (!evergreen_vm_reg_valid(reg)) {
+ DRM_ERROR("CP DMA Bad SRC register\n");
+ return -EINVAL;
+ }
+ }
+ }
+ }
+ }
+ if (command & PACKET3_CP_DMA_CMD_DAS) {
+ /* dst address space is register */
+ if (((info & 0x00300000) >> 20) == 0) {
+ start_reg = ib[idx + 2];
+ if (command & PACKET3_CP_DMA_CMD_DAIC) {
+ reg = start_reg;
+ if (!evergreen_vm_reg_valid(reg)) {
+ DRM_ERROR("CP DMA Bad DST register\n");
+ return -EINVAL;
+ }
+ } else {
+ for (i = 0; i < (command & 0x1fffff); i++) {
+ reg = start_reg + (4 * i);
+ if (!evergreen_vm_reg_valid(reg)) {
+ DRM_ERROR("CP DMA Bad DST register\n");
+ return -EINVAL;
+ }
+ }
+ }
+ }
+ }
+ break;
default:
return -EINVAL;
}
@@ -2955,3 +3580,114 @@ int evergreen_ib_parse(struct radeon_device *rdev, struct radeon_ib *ib)
return ret;
}
+
+/**
+ * evergreen_dma_ib_parse() - parse the DMA IB for VM
+ * @rdev: radeon_device pointer
+ * @ib: radeon_ib pointer
+ *
+ * Parses the DMA IB from the VM CS ioctl
+ * checks for errors. (Cayman-SI)
+ * Returns 0 for success and an error on failure.
+ **/
+int evergreen_dma_ib_parse(struct radeon_device *rdev, struct radeon_ib *ib)
+{
+ u32 idx = 0;
+ u32 header, cmd, count, tiled, new_cmd, misc;
+
+ do {
+ header = ib->ptr[idx];
+ cmd = GET_DMA_CMD(header);
+ count = GET_DMA_COUNT(header);
+ tiled = GET_DMA_T(header);
+ new_cmd = GET_DMA_NEW(header);
+ misc = GET_DMA_MISC(header);
+
+ switch (cmd) {
+ case DMA_PACKET_WRITE:
+ if (tiled)
+ idx += count + 7;
+ else
+ idx += count + 3;
+ break;
+ case DMA_PACKET_COPY:
+ if (tiled) {
+ if (new_cmd) {
+ switch (misc) {
+ case 0:
+ /* L2T, frame to fields */
+ idx += 10;
+ break;
+ case 1:
+ /* L2T, T2L partial */
+ idx += 12;
+ break;
+ case 3:
+ /* L2T, broadcast */
+ idx += 10;
+ break;
+ case 4:
+ /* L2T, T2L */
+ idx += 9;
+ break;
+ case 5:
+ /* T2T partial */
+ idx += 13;
+ break;
+ case 7:
+ /* L2T, broadcast */
+ idx += 10;
+ break;
+ default:
+ DRM_ERROR("bad DMA_PACKET_COPY misc %u\n", misc);
+ return -EINVAL;
+ }
+ } else {
+ switch (misc) {
+ case 0:
+ idx += 9;
+ break;
+ default:
+ DRM_ERROR("bad DMA_PACKET_COPY misc %u\n", misc);
+ return -EINVAL;
+ }
+ }
+ } else {
+ if (new_cmd) {
+ switch (misc) {
+ case 0:
+ /* L2L, byte */
+ idx += 5;
+ break;
+ case 1:
+ /* L2L, partial */
+ idx += 9;
+ break;
+ case 4:
+ /* L2L, dw, broadcast */
+ idx += 7;
+ break;
+ default:
+ DRM_ERROR("bad DMA_PACKET_COPY misc %u\n", misc);
+ return -EINVAL;
+ }
+ } else {
+ /* L2L, dw */
+ idx += 5;
+ }
+ }
+ break;
+ case DMA_PACKET_CONSTANT_FILL:
+ idx += 4;
+ break;
+ case DMA_PACKET_NOP:
+ idx += 1;
+ break;
+ default:
+ DRM_ERROR("Unknown packet type %d at %d !\n", cmd, idx);
+ return -EINVAL;
+ }
+ } while (idx < ib->length_dw);
+
+ return 0;
+}
diff --git a/drivers/gpu/drm/radeon/evergreend.h b/drivers/gpu/drm/radeon/evergreend.h
index df542f1a5dfb..cb9baaac9e85 100644
--- a/drivers/gpu/drm/radeon/evergreend.h
+++ b/drivers/gpu/drm/radeon/evergreend.h
@@ -45,6 +45,8 @@
#define TURKS_GB_ADDR_CONFIG_GOLDEN 0x02010002
#define CEDAR_GB_ADDR_CONFIG_GOLDEN 0x02010001
#define CAICOS_GB_ADDR_CONFIG_GOLDEN 0x02010001
+#define SUMO_GB_ADDR_CONFIG_GOLDEN 0x02010002
+#define SUMO2_GB_ADDR_CONFIG_GOLDEN 0x02010002
/* Registers */
@@ -91,6 +93,10 @@
#define FB_READ_EN (1 << 0)
#define FB_WRITE_EN (1 << 1)
+#define CP_STRMOUT_CNTL 0x84FC
+
+#define CP_COHER_CNTL 0x85F0
+#define CP_COHER_SIZE 0x85F4
#define CP_COHER_BASE 0x85F8
#define CP_STALLED_STAT1 0x8674
#define CP_STALLED_STAT2 0x8678
@@ -351,6 +357,54 @@
# define AFMT_MPEG_INFO_UPDATE (1 << 10)
#define AFMT_GENERIC0_7 0x7138
+/* DCE4/5 ELD audio interface */
+#define AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR0 0x5f84 /* LPCM */
+#define AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR1 0x5f88 /* AC3 */
+#define AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR2 0x5f8c /* MPEG1 */
+#define AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR3 0x5f90 /* MP3 */
+#define AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR4 0x5f94 /* MPEG2 */
+#define AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR5 0x5f98 /* AAC */
+#define AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR6 0x5f9c /* DTS */
+#define AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR7 0x5fa0 /* ATRAC */
+#define AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR8 0x5fa4 /* one bit audio - leave at 0 (default) */
+#define AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR9 0x5fa8 /* Dolby Digital */
+#define AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR10 0x5fac /* DTS-HD */
+#define AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR11 0x5fb0 /* MAT-MLP */
+#define AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR12 0x5fb4 /* DTS */
+#define AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR13 0x5fb8 /* WMA Pro */
+# define MAX_CHANNELS(x) (((x) & 0x7) << 0)
+/* max channels minus one. 7 = 8 channels */
+# define SUPPORTED_FREQUENCIES(x) (((x) & 0xff) << 8)
+# define DESCRIPTOR_BYTE_2(x) (((x) & 0xff) << 16)
+# define SUPPORTED_FREQUENCIES_STEREO(x) (((x) & 0xff) << 24) /* LPCM only */
+/* SUPPORTED_FREQUENCIES, SUPPORTED_FREQUENCIES_STEREO
+ * bit0 = 32 kHz
+ * bit1 = 44.1 kHz
+ * bit2 = 48 kHz
+ * bit3 = 88.2 kHz
+ * bit4 = 96 kHz
+ * bit5 = 176.4 kHz
+ * bit6 = 192 kHz
+ */
+
+#define AZ_HOT_PLUG_CONTROL 0x5e78
+# define AZ_FORCE_CODEC_WAKE (1 << 0)
+# define PIN0_JACK_DETECTION_ENABLE (1 << 4)
+# define PIN1_JACK_DETECTION_ENABLE (1 << 5)
+# define PIN2_JACK_DETECTION_ENABLE (1 << 6)
+# define PIN3_JACK_DETECTION_ENABLE (1 << 7)
+# define PIN0_UNSOLICITED_RESPONSE_ENABLE (1 << 8)
+# define PIN1_UNSOLICITED_RESPONSE_ENABLE (1 << 9)
+# define PIN2_UNSOLICITED_RESPONSE_ENABLE (1 << 10)
+# define PIN3_UNSOLICITED_RESPONSE_ENABLE (1 << 11)
+# define CODEC_HOT_PLUG_ENABLE (1 << 12)
+# define PIN0_AUDIO_ENABLED (1 << 24)
+# define PIN1_AUDIO_ENABLED (1 << 25)
+# define PIN2_AUDIO_ENABLED (1 << 26)
+# define PIN3_AUDIO_ENABLED (1 << 27)
+# define AUDIO_ENABLED (1 << 31)
+
+
#define GC_USER_SHADER_PIPE_CONFIG 0x8954
#define INACTIVE_QD_PIPES(x) ((x) << 8)
#define INACTIVE_QD_PIPES_MASK 0x0000FF00
@@ -647,6 +701,7 @@
#define PAGE_TABLE_DEPTH(x) (((x) & 3) << 1)
#define RANGE_PROTECTION_FAULT_ENABLE_DEFAULT (1 << 4)
#define VM_CONTEXT1_CNTL 0x1414
+#define VM_CONTEXT1_CNTL2 0x1434
#define VM_CONTEXT0_PAGE_TABLE_BASE_ADDR 0x153C
#define VM_CONTEXT0_PAGE_TABLE_END_ADDR 0x157C
#define VM_CONTEXT0_PAGE_TABLE_START_ADDR 0x155C
@@ -668,6 +723,8 @@
#define CACHE_UPDATE_MODE(x) ((x) << 6)
#define VM_L2_STATUS 0x140C
#define L2_BUSY (1 << 0)
+#define VM_CONTEXT1_PROTECTION_FAULT_ADDR 0x14FC
+#define VM_CONTEXT1_PROTECTION_FAULT_STATUS 0x14DC
#define WAIT_UNTIL 0x8040
@@ -850,6 +907,37 @@
# define DC_HPDx_RX_INT_TIMER(x) ((x) << 16)
# define DC_HPDx_EN (1 << 28)
+/* ASYNC DMA */
+#define DMA_RB_RPTR 0xd008
+#define DMA_RB_WPTR 0xd00c
+
+#define DMA_CNTL 0xd02c
+# define TRAP_ENABLE (1 << 0)
+# define SEM_INCOMPLETE_INT_ENABLE (1 << 1)
+# define SEM_WAIT_INT_ENABLE (1 << 2)
+# define DATA_SWAP_ENABLE (1 << 3)
+# define FENCE_SWAP_ENABLE (1 << 4)
+# define CTXEMPTY_INT_ENABLE (1 << 28)
+#define DMA_TILING_CONFIG 0xD0B8
+
+#define CAYMAN_DMA1_CNTL 0xd82c
+
+/* async DMA packets */
+#define DMA_PACKET(cmd, t, s, n) ((((cmd) & 0xF) << 28) | \
+ (((t) & 0x1) << 23) | \
+ (((s) & 0x1) << 22) | \
+ (((n) & 0xFFFFF) << 0))
+/* async DMA Packet types */
+#define DMA_PACKET_WRITE 0x2
+#define DMA_PACKET_COPY 0x3
+#define DMA_PACKET_INDIRECT_BUFFER 0x4
+#define DMA_PACKET_SEMAPHORE 0x5
+#define DMA_PACKET_FENCE 0x6
+#define DMA_PACKET_TRAP 0x7
+#define DMA_PACKET_SRBM_WRITE 0x9
+#define DMA_PACKET_CONSTANT_FILL 0xd
+#define DMA_PACKET_NOP 0xf
+
/* PCIE link stuff */
#define PCIE_LC_TRAINING_CNTL 0xa1 /* PCIE_P */
#define PCIE_LC_LINK_WIDTH_CNTL 0xa2 /* PCIE_P */
@@ -947,6 +1035,53 @@
#define PACKET3_WAIT_REG_MEM 0x3C
#define PACKET3_MEM_WRITE 0x3D
#define PACKET3_INDIRECT_BUFFER 0x32
+#define PACKET3_CP_DMA 0x41
+/* 1. header
+ * 2. SRC_ADDR_LO or DATA [31:0]
+ * 3. CP_SYNC [31] | SRC_SEL [30:29] | ENGINE [27] | DST_SEL [21:20] |
+ * SRC_ADDR_HI [7:0]
+ * 4. DST_ADDR_LO [31:0]
+ * 5. DST_ADDR_HI [7:0]
+ * 6. COMMAND [29:22] | BYTE_COUNT [20:0]
+ */
+# define PACKET3_CP_DMA_DST_SEL(x) ((x) << 20)
+ /* 0 - SRC_ADDR
+ * 1 - GDS
+ */
+# define PACKET3_CP_DMA_ENGINE(x) ((x) << 27)
+ /* 0 - ME
+ * 1 - PFP
+ */
+# define PACKET3_CP_DMA_SRC_SEL(x) ((x) << 29)
+ /* 0 - SRC_ADDR
+ * 1 - GDS
+ * 2 - DATA
+ */
+# define PACKET3_CP_DMA_CP_SYNC (1 << 31)
+/* COMMAND */
+# define PACKET3_CP_DMA_DIS_WC (1 << 21)
+# define PACKET3_CP_DMA_CMD_SRC_SWAP(x) ((x) << 23)
+ /* 0 - none
+ * 1 - 8 in 16
+ * 2 - 8 in 32
+ * 3 - 8 in 64
+ */
+# define PACKET3_CP_DMA_CMD_DST_SWAP(x) ((x) << 24)
+ /* 0 - none
+ * 1 - 8 in 16
+ * 2 - 8 in 32
+ * 3 - 8 in 64
+ */
+# define PACKET3_CP_DMA_CMD_SAS (1 << 26)
+ /* 0 - memory
+ * 1 - register
+ */
+# define PACKET3_CP_DMA_CMD_DAS (1 << 27)
+ /* 0 - memory
+ * 1 - register
+ */
+# define PACKET3_CP_DMA_CMD_SAIC (1 << 28)
+# define PACKET3_CP_DMA_CMD_DAIC (1 << 29)
#define PACKET3_SURFACE_SYNC 0x43
# define PACKET3_CB0_DEST_BASE_ENA (1 << 6)
# define PACKET3_CB1_DEST_BASE_ENA (1 << 7)
diff --git a/drivers/gpu/drm/radeon/ni.c b/drivers/gpu/drm/radeon/ni.c
index 81e6a568c29d..39e8be1d1e89 100644
--- a/drivers/gpu/drm/radeon/ni.c
+++ b/drivers/gpu/drm/radeon/ni.c
@@ -611,6 +611,8 @@ static void cayman_gpu_init(struct radeon_device *rdev)
WREG32(GB_ADDR_CONFIG, gb_addr_config);
WREG32(DMIF_ADDR_CONFIG, gb_addr_config);
WREG32(HDP_ADDR_CONFIG, gb_addr_config);
+ WREG32(DMA_TILING_CONFIG + DMA0_REGISTER_OFFSET, gb_addr_config);
+ WREG32(DMA_TILING_CONFIG + DMA1_REGISTER_OFFSET, gb_addr_config);
tmp = gb_addr_config & NUM_PIPES_MASK;
tmp = r6xx_remap_render_backend(rdev, tmp,
@@ -784,10 +786,20 @@ static int cayman_pcie_gart_enable(struct radeon_device *rdev)
/* enable context1-7 */
WREG32(VM_CONTEXT1_PROTECTION_FAULT_DEFAULT_ADDR,
(u32)(rdev->dummy_page.addr >> 12));
- WREG32(VM_CONTEXT1_CNTL2, 0);
- WREG32(VM_CONTEXT1_CNTL, 0);
+ WREG32(VM_CONTEXT1_CNTL2, 4);
WREG32(VM_CONTEXT1_CNTL, ENABLE_CONTEXT | PAGE_TABLE_DEPTH(1) |
- RANGE_PROTECTION_FAULT_ENABLE_DEFAULT);
+ RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT |
+ RANGE_PROTECTION_FAULT_ENABLE_DEFAULT |
+ DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT |
+ DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT |
+ PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT |
+ PDE0_PROTECTION_FAULT_ENABLE_DEFAULT |
+ VALID_PROTECTION_FAULT_ENABLE_INTERRUPT |
+ VALID_PROTECTION_FAULT_ENABLE_DEFAULT |
+ READ_PROTECTION_FAULT_ENABLE_INTERRUPT |
+ READ_PROTECTION_FAULT_ENABLE_DEFAULT |
+ WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT |
+ WRITE_PROTECTION_FAULT_ENABLE_DEFAULT);
cayman_pcie_gart_tlb_flush(rdev);
DRM_INFO("PCIE GART of %uM enabled (table at 0x%016llX).\n",
@@ -905,6 +917,7 @@ static void cayman_cp_enable(struct radeon_device *rdev, bool enable)
radeon_ttm_set_active_vram_size(rdev, rdev->mc.visible_vram_size);
WREG32(CP_ME_CNTL, (CP_ME_HALT | CP_PFP_HALT));
WREG32(SCRATCH_UMSK, 0);
+ rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ready = false;
}
}
@@ -1118,6 +1131,181 @@ static int cayman_cp_resume(struct radeon_device *rdev)
return 0;
}
+/*
+ * DMA
+ * Starting with R600, the GPU has an asynchronous
+ * DMA engine. The programming model is very similar
+ * to the 3D engine (ring buffer, IBs, etc.), but the
+ * DMA controller has it's own packet format that is
+ * different form the PM4 format used by the 3D engine.
+ * It supports copying data, writing embedded data,
+ * solid fills, and a number of other things. It also
+ * has support for tiling/detiling of buffers.
+ * Cayman and newer support two asynchronous DMA engines.
+ */
+/**
+ * cayman_dma_ring_ib_execute - Schedule an IB on the DMA engine
+ *
+ * @rdev: radeon_device pointer
+ * @ib: IB object to schedule
+ *
+ * Schedule an IB in the DMA ring (cayman-SI).
+ */
+void cayman_dma_ring_ib_execute(struct radeon_device *rdev,
+ struct radeon_ib *ib)
+{
+ struct radeon_ring *ring = &rdev->ring[ib->ring];
+
+ if (rdev->wb.enabled) {
+ u32 next_rptr = ring->wptr + 4;
+ while ((next_rptr & 7) != 5)
+ next_rptr++;
+ next_rptr += 3;
+ radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_WRITE, 0, 0, 1));
+ radeon_ring_write(ring, ring->next_rptr_gpu_addr & 0xfffffffc);
+ radeon_ring_write(ring, upper_32_bits(ring->next_rptr_gpu_addr) & 0xff);
+ radeon_ring_write(ring, next_rptr);
+ }
+
+ /* The indirect buffer packet must end on an 8 DW boundary in the DMA ring.
+ * Pad as necessary with NOPs.
+ */
+ while ((ring->wptr & 7) != 5)
+ radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_NOP, 0, 0, 0));
+ radeon_ring_write(ring, DMA_IB_PACKET(DMA_PACKET_INDIRECT_BUFFER, ib->vm ? ib->vm->id : 0, 0));
+ radeon_ring_write(ring, (ib->gpu_addr & 0xFFFFFFE0));
+ radeon_ring_write(ring, (ib->length_dw << 12) | (upper_32_bits(ib->gpu_addr) & 0xFF));
+
+}
+
+/**
+ * cayman_dma_stop - stop the async dma engines
+ *
+ * @rdev: radeon_device pointer
+ *
+ * Stop the async dma engines (cayman-SI).
+ */
+void cayman_dma_stop(struct radeon_device *rdev)
+{
+ u32 rb_cntl;
+
+ radeon_ttm_set_active_vram_size(rdev, rdev->mc.visible_vram_size);
+
+ /* dma0 */
+ rb_cntl = RREG32(DMA_RB_CNTL + DMA0_REGISTER_OFFSET);
+ rb_cntl &= ~DMA_RB_ENABLE;
+ WREG32(DMA_RB_CNTL + DMA0_REGISTER_OFFSET, rb_cntl);
+
+ /* dma1 */
+ rb_cntl = RREG32(DMA_RB_CNTL + DMA1_REGISTER_OFFSET);
+ rb_cntl &= ~DMA_RB_ENABLE;
+ WREG32(DMA_RB_CNTL + DMA1_REGISTER_OFFSET, rb_cntl);
+
+ rdev->ring[R600_RING_TYPE_DMA_INDEX].ready = false;
+ rdev->ring[CAYMAN_RING_TYPE_DMA1_INDEX].ready = false;
+}
+
+/**
+ * cayman_dma_resume - setup and start the async dma engines
+ *
+ * @rdev: radeon_device pointer
+ *
+ * Set up the DMA ring buffers and enable them. (cayman-SI).
+ * Returns 0 for success, error for failure.
+ */
+int cayman_dma_resume(struct radeon_device *rdev)
+{
+ struct radeon_ring *ring;
+ u32 rb_cntl, dma_cntl;
+ u32 rb_bufsz;
+ u32 reg_offset, wb_offset;
+ int i, r;
+
+ /* Reset dma */
+ WREG32(SRBM_SOFT_RESET, SOFT_RESET_DMA | SOFT_RESET_DMA1);
+ RREG32(SRBM_SOFT_RESET);
+ udelay(50);
+ WREG32(SRBM_SOFT_RESET, 0);
+
+ for (i = 0; i < 2; i++) {
+ if (i == 0) {
+ ring = &rdev->ring[R600_RING_TYPE_DMA_INDEX];
+ reg_offset = DMA0_REGISTER_OFFSET;
+ wb_offset = R600_WB_DMA_RPTR_OFFSET;
+ } else {
+ ring = &rdev->ring[CAYMAN_RING_TYPE_DMA1_INDEX];
+ reg_offset = DMA1_REGISTER_OFFSET;
+ wb_offset = CAYMAN_WB_DMA1_RPTR_OFFSET;
+ }
+
+ WREG32(DMA_SEM_INCOMPLETE_TIMER_CNTL + reg_offset, 0);
+ WREG32(DMA_SEM_WAIT_FAIL_TIMER_CNTL + reg_offset, 0);
+
+ /* Set ring buffer size in dwords */
+ rb_bufsz = drm_order(ring->ring_size / 4);
+ rb_cntl = rb_bufsz << 1;
+#ifdef __BIG_ENDIAN
+ rb_cntl |= DMA_RB_SWAP_ENABLE | DMA_RPTR_WRITEBACK_SWAP_ENABLE;
+#endif
+ WREG32(DMA_RB_CNTL + reg_offset, rb_cntl);
+
+ /* Initialize the ring buffer's read and write pointers */
+ WREG32(DMA_RB_RPTR + reg_offset, 0);
+ WREG32(DMA_RB_WPTR + reg_offset, 0);
+
+ /* set the wb address whether it's enabled or not */
+ WREG32(DMA_RB_RPTR_ADDR_HI + reg_offset,
+ upper_32_bits(rdev->wb.gpu_addr + wb_offset) & 0xFF);
+ WREG32(DMA_RB_RPTR_ADDR_LO + reg_offset,
+ ((rdev->wb.gpu_addr + wb_offset) & 0xFFFFFFFC));
+
+ if (rdev->wb.enabled)
+ rb_cntl |= DMA_RPTR_WRITEBACK_ENABLE;
+
+ WREG32(DMA_RB_BASE + reg_offset, ring->gpu_addr >> 8);
+
+ /* enable DMA IBs */
+ WREG32(DMA_IB_CNTL + reg_offset, DMA_IB_ENABLE | CMD_VMID_FORCE);
+
+ dma_cntl = RREG32(DMA_CNTL + reg_offset);
+ dma_cntl &= ~CTXEMPTY_INT_ENABLE;
+ WREG32(DMA_CNTL + reg_offset, dma_cntl);
+
+ ring->wptr = 0;
+ WREG32(DMA_RB_WPTR + reg_offset, ring->wptr << 2);
+
+ ring->rptr = RREG32(DMA_RB_RPTR + reg_offset) >> 2;
+
+ WREG32(DMA_RB_CNTL + reg_offset, rb_cntl | DMA_RB_ENABLE);
+
+ ring->ready = true;
+
+ r = radeon_ring_test(rdev, ring->idx, ring);
+ if (r) {
+ ring->ready = false;
+ return r;
+ }
+ }
+
+ radeon_ttm_set_active_vram_size(rdev, rdev->mc.real_vram_size);
+
+ return 0;
+}
+
+/**
+ * cayman_dma_fini - tear down the async dma engines
+ *
+ * @rdev: radeon_device pointer
+ *
+ * Stop the async dma engines and free the rings (cayman-SI).
+ */
+void cayman_dma_fini(struct radeon_device *rdev)
+{
+ cayman_dma_stop(rdev);
+ radeon_ring_fini(rdev, &rdev->ring[R600_RING_TYPE_DMA_INDEX]);
+ radeon_ring_fini(rdev, &rdev->ring[CAYMAN_RING_TYPE_DMA1_INDEX]);
+}
+
static int cayman_gpu_soft_reset(struct radeon_device *rdev)
{
struct evergreen_mc_save save;
@@ -1208,6 +1396,32 @@ int cayman_asic_reset(struct radeon_device *rdev)
return cayman_gpu_soft_reset(rdev);
}
+/**
+ * cayman_dma_is_lockup - Check if the DMA engine is locked up
+ *
+ * @rdev: radeon_device pointer
+ * @ring: radeon_ring structure holding ring information
+ *
+ * Check if the async DMA engine is locked up (cayman-SI).
+ * Returns true if the engine appears to be locked up, false if not.
+ */
+bool cayman_dma_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring)
+{
+ u32 dma_status_reg;
+
+ if (ring->idx == R600_RING_TYPE_DMA_INDEX)
+ dma_status_reg = RREG32(DMA_STATUS_REG + DMA0_REGISTER_OFFSET);
+ else
+ dma_status_reg = RREG32(DMA_STATUS_REG + DMA1_REGISTER_OFFSET);
+ if (dma_status_reg & DMA_IDLE) {
+ radeon_ring_lockup_update(ring);
+ return false;
+ }
+ /* force ring activities */
+ radeon_ring_force_activity(rdev, ring);
+ return radeon_ring_test_lockup(rdev, ring);
+}
+
static int cayman_startup(struct radeon_device *rdev)
{
struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
@@ -1289,6 +1503,18 @@ static int cayman_startup(struct radeon_device *rdev)
return r;
}
+ r = radeon_fence_driver_start_ring(rdev, R600_RING_TYPE_DMA_INDEX);
+ if (r) {
+ dev_err(rdev->dev, "failed initializing DMA fences (%d).\n", r);
+ return r;
+ }
+
+ r = radeon_fence_driver_start_ring(rdev, CAYMAN_RING_TYPE_DMA1_INDEX);
+ if (r) {
+ dev_err(rdev->dev, "failed initializing DMA fences (%d).\n", r);
+ return r;
+ }
+
/* Enable IRQ */
r = r600_irq_init(rdev);
if (r) {
@@ -1303,6 +1529,23 @@ static int cayman_startup(struct radeon_device *rdev)
0, 0xfffff, RADEON_CP_PACKET2);
if (r)
return r;
+
+ ring = &rdev->ring[R600_RING_TYPE_DMA_INDEX];
+ r = radeon_ring_init(rdev, ring, ring->ring_size, R600_WB_DMA_RPTR_OFFSET,
+ DMA_RB_RPTR + DMA0_REGISTER_OFFSET,
+ DMA_RB_WPTR + DMA0_REGISTER_OFFSET,
+ 2, 0x3fffc, DMA_PACKET(DMA_PACKET_NOP, 0, 0, 0));
+ if (r)
+ return r;
+
+ ring = &rdev->ring[CAYMAN_RING_TYPE_DMA1_INDEX];
+ r = radeon_ring_init(rdev, ring, ring->ring_size, CAYMAN_WB_DMA1_RPTR_OFFSET,
+ DMA_RB_RPTR + DMA1_REGISTER_OFFSET,
+ DMA_RB_WPTR + DMA1_REGISTER_OFFSET,
+ 2, 0x3fffc, DMA_PACKET(DMA_PACKET_NOP, 0, 0, 0));
+ if (r)
+ return r;
+
r = cayman_cp_load_microcode(rdev);
if (r)
return r;
@@ -1310,6 +1553,10 @@ static int cayman_startup(struct radeon_device *rdev)
if (r)
return r;
+ r = cayman_dma_resume(rdev);
+ if (r)
+ return r;
+
r = radeon_ib_pool_init(rdev);
if (r) {
dev_err(rdev->dev, "IB initialization failed (%d).\n", r);
@@ -1354,7 +1601,7 @@ int cayman_suspend(struct radeon_device *rdev)
{
r600_audio_fini(rdev);
cayman_cp_enable(rdev, false);
- rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ready = false;
+ cayman_dma_stop(rdev);
evergreen_irq_suspend(rdev);
radeon_wb_disable(rdev);
cayman_pcie_gart_disable(rdev);
@@ -1421,6 +1668,14 @@ int cayman_init(struct radeon_device *rdev)
ring->ring_obj = NULL;
r600_ring_init(rdev, ring, 1024 * 1024);
+ ring = &rdev->ring[R600_RING_TYPE_DMA_INDEX];
+ ring->ring_obj = NULL;
+ r600_ring_init(rdev, ring, 64 * 1024);
+
+ ring = &rdev->ring[CAYMAN_RING_TYPE_DMA1_INDEX];
+ ring->ring_obj = NULL;
+ r600_ring_init(rdev, ring, 64 * 1024);
+
rdev->ih.ring_obj = NULL;
r600_ih_ring_init(rdev, 64 * 1024);
@@ -1433,6 +1688,7 @@ int cayman_init(struct radeon_device *rdev)
if (r) {
dev_err(rdev->dev, "disabling GPU acceleration\n");
cayman_cp_fini(rdev);
+ cayman_dma_fini(rdev);
r600_irq_fini(rdev);
if (rdev->flags & RADEON_IS_IGP)
si_rlc_fini(rdev);
@@ -1463,6 +1719,7 @@ void cayman_fini(struct radeon_device *rdev)
{
r600_blit_fini(rdev);
cayman_cp_fini(rdev);
+ cayman_dma_fini(rdev);
r600_irq_fini(rdev);
if (rdev->flags & RADEON_IS_IGP)
si_rlc_fini(rdev);
@@ -1538,30 +1795,57 @@ void cayman_vm_set_page(struct radeon_device *rdev, uint64_t pe,
{
struct radeon_ring *ring = &rdev->ring[rdev->asic->vm.pt_ring_index];
uint32_t r600_flags = cayman_vm_page_flags(rdev, flags);
-
- while (count) {
- unsigned ndw = 1 + count * 2;
- if (ndw > 0x3FFF)
- ndw = 0x3FFF;
-
- radeon_ring_write(ring, PACKET3(PACKET3_ME_WRITE, ndw));
- radeon_ring_write(ring, pe);
- radeon_ring_write(ring, upper_32_bits(pe) & 0xff);
- for (; ndw > 1; ndw -= 2, --count, pe += 8) {
- uint64_t value = 0;
- if (flags & RADEON_VM_PAGE_SYSTEM) {
- value = radeon_vm_map_gart(rdev, addr);
- value &= 0xFFFFFFFFFFFFF000ULL;
+ uint64_t value;
+ unsigned ndw;
+
+ if (rdev->asic->vm.pt_ring_index == RADEON_RING_TYPE_GFX_INDEX) {
+ while (count) {
+ ndw = 1 + count * 2;
+ if (ndw > 0x3FFF)
+ ndw = 0x3FFF;
+
+ radeon_ring_write(ring, PACKET3(PACKET3_ME_WRITE, ndw));
+ radeon_ring_write(ring, pe);
+ radeon_ring_write(ring, upper_32_bits(pe) & 0xff);
+ for (; ndw > 1; ndw -= 2, --count, pe += 8) {
+ if (flags & RADEON_VM_PAGE_SYSTEM) {
+ value = radeon_vm_map_gart(rdev, addr);
+ value &= 0xFFFFFFFFFFFFF000ULL;
+ } else if (flags & RADEON_VM_PAGE_VALID) {
+ value = addr;
+ } else {
+ value = 0;
+ }
addr += incr;
-
- } else if (flags & RADEON_VM_PAGE_VALID) {
- value = addr;
+ value |= r600_flags;
+ radeon_ring_write(ring, value);
+ radeon_ring_write(ring, upper_32_bits(value));
+ }
+ }
+ } else {
+ while (count) {
+ ndw = count * 2;
+ if (ndw > 0xFFFFE)
+ ndw = 0xFFFFE;
+
+ /* for non-physically contiguous pages (system) */
+ radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_WRITE, 0, 0, ndw));
+ radeon_ring_write(ring, pe);
+ radeon_ring_write(ring, upper_32_bits(pe) & 0xff);
+ for (; ndw > 0; ndw -= 2, --count, pe += 8) {
+ if (flags & RADEON_VM_PAGE_SYSTEM) {
+ value = radeon_vm_map_gart(rdev, addr);
+ value &= 0xFFFFFFFFFFFFF000ULL;
+ } else if (flags & RADEON_VM_PAGE_VALID) {
+ value = addr;
+ } else {
+ value = 0;
+ }
addr += incr;
+ value |= r600_flags;
+ radeon_ring_write(ring, value);
+ radeon_ring_write(ring, upper_32_bits(value));
}
-
- value |= r600_flags;
- radeon_ring_write(ring, value);
- radeon_ring_write(ring, upper_32_bits(value));
}
}
}
@@ -1596,3 +1880,26 @@ void cayman_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm)
radeon_ring_write(ring, PACKET3(PACKET3_PFP_SYNC_ME, 0));
radeon_ring_write(ring, 0x0);
}
+
+void cayman_dma_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm)
+{
+ struct radeon_ring *ring = &rdev->ring[ridx];
+
+ if (vm == NULL)
+ return;
+
+ radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_SRBM_WRITE, 0, 0, 0));
+ radeon_ring_write(ring, (0xf << 16) | ((VM_CONTEXT0_PAGE_TABLE_BASE_ADDR + (vm->id << 2)) >> 2));
+ radeon_ring_write(ring, vm->pd_gpu_addr >> 12);
+
+ /* flush hdp cache */
+ radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_SRBM_WRITE, 0, 0, 0));
+ radeon_ring_write(ring, (0xf << 16) | (HDP_MEM_COHERENCY_FLUSH_CNTL >> 2));
+ radeon_ring_write(ring, 1);
+
+ /* bits 0-7 are the VM contexts0-7 */
+ radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_SRBM_WRITE, 0, 0, 0));
+ radeon_ring_write(ring, (0xf << 16) | (VM_INVALIDATE_REQUEST >> 2));
+ radeon_ring_write(ring, 1 << vm->id);
+}
+
diff --git a/drivers/gpu/drm/radeon/nid.h b/drivers/gpu/drm/radeon/nid.h
index cbef6815907a..b93186b8ee4b 100644
--- a/drivers/gpu/drm/radeon/nid.h
+++ b/drivers/gpu/drm/radeon/nid.h
@@ -50,6 +50,24 @@
#define VMID(x) (((x) & 0x7) << 0)
#define SRBM_STATUS 0x0E50
+#define SRBM_SOFT_RESET 0x0E60
+#define SOFT_RESET_BIF (1 << 1)
+#define SOFT_RESET_CG (1 << 2)
+#define SOFT_RESET_DC (1 << 5)
+#define SOFT_RESET_DMA1 (1 << 6)
+#define SOFT_RESET_GRBM (1 << 8)
+#define SOFT_RESET_HDP (1 << 9)
+#define SOFT_RESET_IH (1 << 10)
+#define SOFT_RESET_MC (1 << 11)
+#define SOFT_RESET_RLC (1 << 13)
+#define SOFT_RESET_ROM (1 << 14)
+#define SOFT_RESET_SEM (1 << 15)
+#define SOFT_RESET_VMC (1 << 17)
+#define SOFT_RESET_DMA (1 << 20)
+#define SOFT_RESET_TST (1 << 21)
+#define SOFT_RESET_REGBB (1 << 22)
+#define SOFT_RESET_ORB (1 << 23)
+
#define VM_CONTEXT0_REQUEST_RESPONSE 0x1470
#define REQUEST_TYPE(x) (((x) & 0xf) << 0)
#define RESPONSE_TYPE_MASK 0x000000F0
@@ -80,7 +98,18 @@
#define VM_CONTEXT0_CNTL 0x1410
#define ENABLE_CONTEXT (1 << 0)
#define PAGE_TABLE_DEPTH(x) (((x) & 3) << 1)
+#define RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT (1 << 3)
#define RANGE_PROTECTION_FAULT_ENABLE_DEFAULT (1 << 4)
+#define DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT (1 << 6)
+#define DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT (1 << 7)
+#define PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT (1 << 9)
+#define PDE0_PROTECTION_FAULT_ENABLE_DEFAULT (1 << 10)
+#define VALID_PROTECTION_FAULT_ENABLE_INTERRUPT (1 << 12)
+#define VALID_PROTECTION_FAULT_ENABLE_DEFAULT (1 << 13)
+#define READ_PROTECTION_FAULT_ENABLE_INTERRUPT (1 << 15)
+#define READ_PROTECTION_FAULT_ENABLE_DEFAULT (1 << 16)
+#define WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT (1 << 18)
+#define WRITE_PROTECTION_FAULT_ENABLE_DEFAULT (1 << 19)
#define VM_CONTEXT1_CNTL 0x1414
#define VM_CONTEXT0_CNTL2 0x1430
#define VM_CONTEXT1_CNTL2 0x1434
@@ -588,5 +617,62 @@
#define PACKET3_SET_APPEND_CNT 0x75
#define PACKET3_ME_WRITE 0x7A
+/* ASYNC DMA - first instance at 0xd000, second at 0xd800 */
+#define DMA0_REGISTER_OFFSET 0x0 /* not a register */
+#define DMA1_REGISTER_OFFSET 0x800 /* not a register */
+
+#define DMA_RB_CNTL 0xd000
+# define DMA_RB_ENABLE (1 << 0)
+# define DMA_RB_SIZE(x) ((x) << 1) /* log2 */
+# define DMA_RB_SWAP_ENABLE (1 << 9) /* 8IN32 */
+# define DMA_RPTR_WRITEBACK_ENABLE (1 << 12)
+# define DMA_RPTR_WRITEBACK_SWAP_ENABLE (1 << 13) /* 8IN32 */
+# define DMA_RPTR_WRITEBACK_TIMER(x) ((x) << 16) /* log2 */
+#define DMA_RB_BASE 0xd004
+#define DMA_RB_RPTR 0xd008
+#define DMA_RB_WPTR 0xd00c
+
+#define DMA_RB_RPTR_ADDR_HI 0xd01c
+#define DMA_RB_RPTR_ADDR_LO 0xd020
+
+#define DMA_IB_CNTL 0xd024
+# define DMA_IB_ENABLE (1 << 0)
+# define DMA_IB_SWAP_ENABLE (1 << 4)
+# define CMD_VMID_FORCE (1 << 31)
+#define DMA_IB_RPTR 0xd028
+#define DMA_CNTL 0xd02c
+# define TRAP_ENABLE (1 << 0)
+# define SEM_INCOMPLETE_INT_ENABLE (1 << 1)
+# define SEM_WAIT_INT_ENABLE (1 << 2)
+# define DATA_SWAP_ENABLE (1 << 3)
+# define FENCE_SWAP_ENABLE (1 << 4)
+# define CTXEMPTY_INT_ENABLE (1 << 28)
+#define DMA_STATUS_REG 0xd034
+# define DMA_IDLE (1 << 0)
+#define DMA_SEM_INCOMPLETE_TIMER_CNTL 0xd044
+#define DMA_SEM_WAIT_FAIL_TIMER_CNTL 0xd048
+#define DMA_TILING_CONFIG 0xd0b8
+#define DMA_MODE 0xd0bc
+
+#define DMA_PACKET(cmd, t, s, n) ((((cmd) & 0xF) << 28) | \
+ (((t) & 0x1) << 23) | \
+ (((s) & 0x1) << 22) | \
+ (((n) & 0xFFFFF) << 0))
+
+#define DMA_IB_PACKET(cmd, vmid, n) ((((cmd) & 0xF) << 28) | \
+ (((vmid) & 0xF) << 20) | \
+ (((n) & 0xFFFFF) << 0))
+
+/* async DMA Packet types */
+#define DMA_PACKET_WRITE 0x2
+#define DMA_PACKET_COPY 0x3
+#define DMA_PACKET_INDIRECT_BUFFER 0x4
+#define DMA_PACKET_SEMAPHORE 0x5
+#define DMA_PACKET_FENCE 0x6
+#define DMA_PACKET_TRAP 0x7
+#define DMA_PACKET_SRBM_WRITE 0x9
+#define DMA_PACKET_CONSTANT_FILL 0xd
+#define DMA_PACKET_NOP 0xf
+
#endif
diff --git a/drivers/gpu/drm/radeon/r100.c b/drivers/gpu/drm/radeon/r100.c
index 376884f1bcd2..8ff7cac222dc 100644
--- a/drivers/gpu/drm/radeon/r100.c
+++ b/drivers/gpu/drm/radeon/r100.c
@@ -4135,23 +4135,36 @@ int r100_init(struct radeon_device *rdev)
return 0;
}
-uint32_t r100_mm_rreg(struct radeon_device *rdev, uint32_t reg)
+uint32_t r100_mm_rreg(struct radeon_device *rdev, uint32_t reg,
+ bool always_indirect)
{
- if (reg < rdev->rmmio_size)
+ if (reg < rdev->rmmio_size && !always_indirect)
return readl(((void __iomem *)rdev->rmmio) + reg);
else {
+ unsigned long flags;
+ uint32_t ret;
+
+ spin_lock_irqsave(&rdev->mmio_idx_lock, flags);
writel(reg, ((void __iomem *)rdev->rmmio) + RADEON_MM_INDEX);
- return readl(((void __iomem *)rdev->rmmio) + RADEON_MM_DATA);
+ ret = readl(((void __iomem *)rdev->rmmio) + RADEON_MM_DATA);
+ spin_unlock_irqrestore(&rdev->mmio_idx_lock, flags);
+
+ return ret;
}
}
-void r100_mm_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v)
+void r100_mm_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v,
+ bool always_indirect)
{
- if (reg < rdev->rmmio_size)
+ if (reg < rdev->rmmio_size && !always_indirect)
writel(v, ((void __iomem *)rdev->rmmio) + reg);
else {
+ unsigned long flags;
+
+ spin_lock_irqsave(&rdev->mmio_idx_lock, flags);
writel(reg, ((void __iomem *)rdev->rmmio) + RADEON_MM_INDEX);
writel(v, ((void __iomem *)rdev->rmmio) + RADEON_MM_DATA);
+ spin_unlock_irqrestore(&rdev->mmio_idx_lock, flags);
}
}
diff --git a/drivers/gpu/drm/radeon/r600.c b/drivers/gpu/drm/radeon/r600.c
index 169ecc9628ea..2aaf147969bd 100644
--- a/drivers/gpu/drm/radeon/r600.c
+++ b/drivers/gpu/drm/radeon/r600.c
@@ -1370,6 +1370,29 @@ bool r600_gpu_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring)
return radeon_ring_test_lockup(rdev, ring);
}
+/**
+ * r600_dma_is_lockup - Check if the DMA engine is locked up
+ *
+ * @rdev: radeon_device pointer
+ * @ring: radeon_ring structure holding ring information
+ *
+ * Check if the async DMA engine is locked up (r6xx-evergreen).
+ * Returns true if the engine appears to be locked up, false if not.
+ */
+bool r600_dma_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring)
+{
+ u32 dma_status_reg;
+
+ dma_status_reg = RREG32(DMA_STATUS_REG);
+ if (dma_status_reg & DMA_IDLE) {
+ radeon_ring_lockup_update(ring);
+ return false;
+ }
+ /* force ring activities */
+ radeon_ring_force_activity(rdev, ring);
+ return radeon_ring_test_lockup(rdev, ring);
+}
+
int r600_asic_reset(struct radeon_device *rdev)
{
return r600_gpu_soft_reset(rdev);
@@ -1588,6 +1611,7 @@ static void r600_gpu_init(struct radeon_device *rdev)
WREG32(GB_TILING_CONFIG, tiling_config);
WREG32(DCP_TILING_CONFIG, tiling_config & 0xffff);
WREG32(HDP_TILING_CONFIG, tiling_config & 0xffff);
+ WREG32(DMA_TILING_CONFIG, tiling_config & 0xffff);
tmp = R6XX_MAX_PIPES - r600_count_pipe_bits((cc_gc_shader_pipe_config & INACTIVE_QD_PIPES_MASK) >> 8);
WREG32(VGT_OUT_DEALLOC_CNTL, (tmp * 4) & DEALLOC_DIST_MASK);
@@ -1865,6 +1889,7 @@ void r600_cp_stop(struct radeon_device *rdev)
radeon_ttm_set_active_vram_size(rdev, rdev->mc.visible_vram_size);
WREG32(R_0086D8_CP_ME_CNTL, S_0086D8_CP_ME_HALT(1));
WREG32(SCRATCH_UMSK, 0);
+ rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ready = false;
}
int r600_init_microcode(struct radeon_device *rdev)
@@ -2190,6 +2215,128 @@ void r600_cp_fini(struct radeon_device *rdev)
radeon_scratch_free(rdev, ring->rptr_save_reg);
}
+/*
+ * DMA
+ * Starting with R600, the GPU has an asynchronous
+ * DMA engine. The programming model is very similar
+ * to the 3D engine (ring buffer, IBs, etc.), but the
+ * DMA controller has it's own packet format that is
+ * different form the PM4 format used by the 3D engine.
+ * It supports copying data, writing embedded data,
+ * solid fills, and a number of other things. It also
+ * has support for tiling/detiling of buffers.
+ */
+/**
+ * r600_dma_stop - stop the async dma engine
+ *
+ * @rdev: radeon_device pointer
+ *
+ * Stop the async dma engine (r6xx-evergreen).
+ */
+void r600_dma_stop(struct radeon_device *rdev)
+{
+ u32 rb_cntl = RREG32(DMA_RB_CNTL);
+
+ radeon_ttm_set_active_vram_size(rdev, rdev->mc.visible_vram_size);
+
+ rb_cntl &= ~DMA_RB_ENABLE;
+ WREG32(DMA_RB_CNTL, rb_cntl);
+
+ rdev->ring[R600_RING_TYPE_DMA_INDEX].ready = false;
+}
+
+/**
+ * r600_dma_resume - setup and start the async dma engine
+ *
+ * @rdev: radeon_device pointer
+ *
+ * Set up the DMA ring buffer and enable it. (r6xx-evergreen).
+ * Returns 0 for success, error for failure.
+ */
+int r600_dma_resume(struct radeon_device *rdev)
+{
+ struct radeon_ring *ring = &rdev->ring[R600_RING_TYPE_DMA_INDEX];
+ u32 rb_cntl, dma_cntl;
+ u32 rb_bufsz;
+ int r;
+
+ /* Reset dma */
+ if (rdev->family >= CHIP_RV770)
+ WREG32(SRBM_SOFT_RESET, RV770_SOFT_RESET_DMA);
+ else
+ WREG32(SRBM_SOFT_RESET, SOFT_RESET_DMA);
+ RREG32(SRBM_SOFT_RESET);
+ udelay(50);
+ WREG32(SRBM_SOFT_RESET, 0);
+
+ WREG32(DMA_SEM_INCOMPLETE_TIMER_CNTL, 0);
+ WREG32(DMA_SEM_WAIT_FAIL_TIMER_CNTL, 0);
+
+ /* Set ring buffer size in dwords */
+ rb_bufsz = drm_order(ring->ring_size / 4);
+ rb_cntl = rb_bufsz << 1;
+#ifdef __BIG_ENDIAN
+ rb_cntl |= DMA_RB_SWAP_ENABLE | DMA_RPTR_WRITEBACK_SWAP_ENABLE;
+#endif
+ WREG32(DMA_RB_CNTL, rb_cntl);
+
+ /* Initialize the ring buffer's read and write pointers */
+ WREG32(DMA_RB_RPTR, 0);
+ WREG32(DMA_RB_WPTR, 0);
+
+ /* set the wb address whether it's enabled or not */
+ WREG32(DMA_RB_RPTR_ADDR_HI,
+ upper_32_bits(rdev->wb.gpu_addr + R600_WB_DMA_RPTR_OFFSET) & 0xFF);
+ WREG32(DMA_RB_RPTR_ADDR_LO,
+ ((rdev->wb.gpu_addr + R600_WB_DMA_RPTR_OFFSET) & 0xFFFFFFFC));
+
+ if (rdev->wb.enabled)
+ rb_cntl |= DMA_RPTR_WRITEBACK_ENABLE;
+
+ WREG32(DMA_RB_BASE, ring->gpu_addr >> 8);
+
+ /* enable DMA IBs */
+ WREG32(DMA_IB_CNTL, DMA_IB_ENABLE);
+
+ dma_cntl = RREG32(DMA_CNTL);
+ dma_cntl &= ~CTXEMPTY_INT_ENABLE;
+ WREG32(DMA_CNTL, dma_cntl);
+
+ if (rdev->family >= CHIP_RV770)
+ WREG32(DMA_MODE, 1);
+
+ ring->wptr = 0;
+ WREG32(DMA_RB_WPTR, ring->wptr << 2);
+
+ ring->rptr = RREG32(DMA_RB_RPTR) >> 2;
+
+ WREG32(DMA_RB_CNTL, rb_cntl | DMA_RB_ENABLE);
+
+ ring->ready = true;
+
+ r = radeon_ring_test(rdev, R600_RING_TYPE_DMA_INDEX, ring);
+ if (r) {
+ ring->ready = false;
+ return r;
+ }
+
+ radeon_ttm_set_active_vram_size(rdev, rdev->mc.real_vram_size);
+
+ return 0;
+}
+
+/**
+ * r600_dma_fini - tear down the async dma engine
+ *
+ * @rdev: radeon_device pointer
+ *
+ * Stop the async dma engine and free the ring (r6xx-evergreen).
+ */
+void r600_dma_fini(struct radeon_device *rdev)
+{
+ r600_dma_stop(rdev);
+ radeon_ring_fini(rdev, &rdev->ring[R600_RING_TYPE_DMA_INDEX]);
+}
/*
* GPU scratch registers helpers function.
@@ -2246,6 +2393,64 @@ int r600_ring_test(struct radeon_device *rdev, struct radeon_ring *ring)
return r;
}
+/**
+ * r600_dma_ring_test - simple async dma engine test
+ *
+ * @rdev: radeon_device pointer
+ * @ring: radeon_ring structure holding ring information
+ *
+ * Test the DMA engine by writing using it to write an
+ * value to memory. (r6xx-SI).
+ * Returns 0 for success, error for failure.
+ */
+int r600_dma_ring_test(struct radeon_device *rdev,
+ struct radeon_ring *ring)
+{
+ unsigned i;
+ int r;
+ void __iomem *ptr = (void *)rdev->vram_scratch.ptr;
+ u32 tmp;
+
+ if (!ptr) {
+ DRM_ERROR("invalid vram scratch pointer\n");
+ return -EINVAL;
+ }
+
+ tmp = 0xCAFEDEAD;
+ writel(tmp, ptr);
+
+ r = radeon_ring_lock(rdev, ring, 4);
+ if (r) {
+ DRM_ERROR("radeon: dma failed to lock ring %d (%d).\n", ring->idx, r);
+ return r;
+ }
+ radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_WRITE, 0, 0, 1));
+ radeon_ring_write(ring, rdev->vram_scratch.gpu_addr & 0xfffffffc);
+ radeon_ring_write(ring, upper_32_bits(rdev->vram_scratch.gpu_addr) & 0xff);
+ radeon_ring_write(ring, 0xDEADBEEF);
+ radeon_ring_unlock_commit(rdev, ring);
+
+ for (i = 0; i < rdev->usec_timeout; i++) {
+ tmp = readl(ptr);
+ if (tmp == 0xDEADBEEF)
+ break;
+ DRM_UDELAY(1);
+ }
+
+ if (i < rdev->usec_timeout) {
+ DRM_INFO("ring test on %d succeeded in %d usecs\n", ring->idx, i);
+ } else {
+ DRM_ERROR("radeon: ring %d test failed (0x%08X)\n",
+ ring->idx, tmp);
+ r = -EINVAL;
+ }
+ return r;
+}
+
+/*
+ * CP fences/semaphores
+ */
+
void r600_fence_ring_emit(struct radeon_device *rdev,
struct radeon_fence *fence)
{
@@ -2309,6 +2514,59 @@ void r600_semaphore_ring_emit(struct radeon_device *rdev,
radeon_ring_write(ring, (upper_32_bits(addr) & 0xff) | sel);
}
+/*
+ * DMA fences/semaphores
+ */
+
+/**
+ * r600_dma_fence_ring_emit - emit a fence on the DMA ring
+ *
+ * @rdev: radeon_device pointer
+ * @fence: radeon fence object
+ *
+ * Add a DMA fence packet to the ring to write
+ * the fence seq number and DMA trap packet to generate
+ * an interrupt if needed (r6xx-r7xx).
+ */
+void r600_dma_fence_ring_emit(struct radeon_device *rdev,
+ struct radeon_fence *fence)
+{
+ struct radeon_ring *ring = &rdev->ring[fence->ring];
+ u64 addr = rdev->fence_drv[fence->ring].gpu_addr;
+
+ /* write the fence */
+ radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_FENCE, 0, 0, 0));
+ radeon_ring_write(ring, addr & 0xfffffffc);
+ radeon_ring_write(ring, (upper_32_bits(addr) & 0xff));
+ radeon_ring_write(ring, lower_32_bits(fence->seq));
+ /* generate an interrupt */
+ radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_TRAP, 0, 0, 0));
+}
+
+/**
+ * r600_dma_semaphore_ring_emit - emit a semaphore on the dma ring
+ *
+ * @rdev: radeon_device pointer
+ * @ring: radeon_ring structure holding ring information
+ * @semaphore: radeon semaphore object
+ * @emit_wait: wait or signal semaphore
+ *
+ * Add a DMA semaphore packet to the ring wait on or signal
+ * other rings (r6xx-SI).
+ */
+void r600_dma_semaphore_ring_emit(struct radeon_device *rdev,
+ struct radeon_ring *ring,
+ struct radeon_semaphore *semaphore,
+ bool emit_wait)
+{
+ u64 addr = semaphore->gpu_addr;
+ u32 s = emit_wait ? 0 : 1;
+
+ radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_SEMAPHORE, 0, s, 0));
+ radeon_ring_write(ring, addr & 0xfffffffc);
+ radeon_ring_write(ring, upper_32_bits(addr) & 0xff);
+}
+
int r600_copy_blit(struct radeon_device *rdev,
uint64_t src_offset,
uint64_t dst_offset,
@@ -2328,6 +2586,80 @@ int r600_copy_blit(struct radeon_device *rdev,
return 0;
}
+/**
+ * r600_copy_dma - copy pages using the DMA engine
+ *
+ * @rdev: radeon_device pointer
+ * @src_offset: src GPU address
+ * @dst_offset: dst GPU address
+ * @num_gpu_pages: number of GPU pages to xfer
+ * @fence: radeon fence object
+ *
+ * Copy GPU paging using the DMA engine (r6xx-r7xx).
+ * Used by the radeon ttm implementation to move pages if
+ * registered as the asic copy callback.
+ */
+int r600_copy_dma(struct radeon_device *rdev,
+ uint64_t src_offset, uint64_t dst_offset,
+ unsigned num_gpu_pages,
+ struct radeon_fence **fence)
+{
+ struct radeon_semaphore *sem = NULL;
+ int ring_index = rdev->asic->copy.dma_ring_index;
+ struct radeon_ring *ring = &rdev->ring[ring_index];
+ u32 size_in_dw, cur_size_in_dw;
+ int i, num_loops;
+ int r = 0;
+
+ r = radeon_semaphore_create(rdev, &sem);
+ if (r) {
+ DRM_ERROR("radeon: moving bo (%d).\n", r);
+ return r;
+ }
+
+ size_in_dw = (num_gpu_pages << RADEON_GPU_PAGE_SHIFT) / 4;
+ num_loops = DIV_ROUND_UP(size_in_dw, 0xffff);
+ r = radeon_ring_lock(rdev, ring, num_loops * 5 + 8);
+ if (r) {
+ DRM_ERROR("radeon: moving bo (%d).\n", r);
+ radeon_semaphore_free(rdev, &sem, NULL);
+ return r;
+ }
+
+ if (radeon_fence_need_sync(*fence, ring->idx)) {
+ radeon_semaphore_sync_rings(rdev, sem, (*fence)->ring,
+ ring->idx);
+ radeon_fence_note_sync(*fence, ring->idx);
+ } else {
+ radeon_semaphore_free(rdev, &sem, NULL);
+ }
+
+ for (i = 0; i < num_loops; i++) {
+ cur_size_in_dw = size_in_dw;
+ if (cur_size_in_dw > 0xFFFF)
+ cur_size_in_dw = 0xFFFF;
+ size_in_dw -= cur_size_in_dw;
+ radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_COPY, 0, 0, cur_size_in_dw));
+ radeon_ring_write(ring, dst_offset & 0xfffffffc);
+ radeon_ring_write(ring, src_offset & 0xfffffffc);
+ radeon_ring_write(ring, upper_32_bits(dst_offset) & 0xff);
+ radeon_ring_write(ring, upper_32_bits(src_offset) & 0xff);
+ src_offset += cur_size_in_dw * 4;
+ dst_offset += cur_size_in_dw * 4;
+ }
+
+ r = radeon_fence_emit(rdev, fence, ring->idx);
+ if (r) {
+ radeon_ring_unlock_undo(rdev, ring);
+ return r;
+ }
+
+ radeon_ring_unlock_commit(rdev, ring);
+ radeon_semaphore_free(rdev, &sem, *fence);
+
+ return r;
+}
+
int r600_set_surface_reg(struct radeon_device *rdev, int reg,
uint32_t tiling_flags, uint32_t pitch,
uint32_t offset, uint32_t obj_size)
@@ -2343,7 +2675,7 @@ void r600_clear_surface_reg(struct radeon_device *rdev, int reg)
static int r600_startup(struct radeon_device *rdev)
{
- struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
+ struct radeon_ring *ring;
int r;
/* enable pcie gen2 link */
@@ -2388,6 +2720,12 @@ static int r600_startup(struct radeon_device *rdev)
return r;
}
+ r = radeon_fence_driver_start_ring(rdev, R600_RING_TYPE_DMA_INDEX);
+ if (r) {
+ dev_err(rdev->dev, "failed initializing DMA fences (%d).\n", r);
+ return r;
+ }
+
/* Enable IRQ */
r = r600_irq_init(rdev);
if (r) {
@@ -2397,12 +2735,20 @@ static int r600_startup(struct radeon_device *rdev)
}
r600_irq_set(rdev);
+ ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
r = radeon_ring_init(rdev, ring, ring->ring_size, RADEON_WB_CP_RPTR_OFFSET,
R600_CP_RB_RPTR, R600_CP_RB_WPTR,
0, 0xfffff, RADEON_CP_PACKET2);
+ if (r)
+ return r;
+ ring = &rdev->ring[R600_RING_TYPE_DMA_INDEX];
+ r = radeon_ring_init(rdev, ring, ring->ring_size, R600_WB_DMA_RPTR_OFFSET,
+ DMA_RB_RPTR, DMA_RB_WPTR,
+ 2, 0x3fffc, DMA_PACKET(DMA_PACKET_NOP, 0, 0, 0));
if (r)
return r;
+
r = r600_cp_load_microcode(rdev);
if (r)
return r;
@@ -2410,6 +2756,10 @@ static int r600_startup(struct radeon_device *rdev)
if (r)
return r;
+ r = r600_dma_resume(rdev);
+ if (r)
+ return r;
+
r = radeon_ib_pool_init(rdev);
if (r) {
dev_err(rdev->dev, "IB initialization failed (%d).\n", r);
@@ -2465,7 +2815,7 @@ int r600_suspend(struct radeon_device *rdev)
{
r600_audio_fini(rdev);
r600_cp_stop(rdev);
- rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ready = false;
+ r600_dma_stop(rdev);
r600_irq_suspend(rdev);
radeon_wb_disable(rdev);
r600_pcie_gart_disable(rdev);
@@ -2538,6 +2888,9 @@ int r600_init(struct radeon_device *rdev)
rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ring_obj = NULL;
r600_ring_init(rdev, &rdev->ring[RADEON_RING_TYPE_GFX_INDEX], 1024 * 1024);
+ rdev->ring[R600_RING_TYPE_DMA_INDEX].ring_obj = NULL;
+ r600_ring_init(rdev, &rdev->ring[R600_RING_TYPE_DMA_INDEX], 64 * 1024);
+
rdev->ih.ring_obj = NULL;
r600_ih_ring_init(rdev, 64 * 1024);
@@ -2550,6 +2903,7 @@ int r600_init(struct radeon_device *rdev)
if (r) {
dev_err(rdev->dev, "disabling GPU acceleration\n");
r600_cp_fini(rdev);
+ r600_dma_fini(rdev);
r600_irq_fini(rdev);
radeon_wb_fini(rdev);
radeon_ib_pool_fini(rdev);
@@ -2566,6 +2920,7 @@ void r600_fini(struct radeon_device *rdev)
r600_audio_fini(rdev);
r600_blit_fini(rdev);
r600_cp_fini(rdev);
+ r600_dma_fini(rdev);
r600_irq_fini(rdev);
radeon_wb_fini(rdev);
radeon_ib_pool_fini(rdev);
@@ -2668,6 +3023,104 @@ free_scratch:
return r;
}
+/**
+ * r600_dma_ib_test - test an IB on the DMA engine
+ *
+ * @rdev: radeon_device pointer
+ * @ring: radeon_ring structure holding ring information
+ *
+ * Test a simple IB in the DMA ring (r6xx-SI).
+ * Returns 0 on success, error on failure.
+ */
+int r600_dma_ib_test(struct radeon_device *rdev, struct radeon_ring *ring)
+{
+ struct radeon_ib ib;
+ unsigned i;
+ int r;
+ void __iomem *ptr = (void *)rdev->vram_scratch.ptr;
+ u32 tmp = 0;
+
+ if (!ptr) {
+ DRM_ERROR("invalid vram scratch pointer\n");
+ return -EINVAL;
+ }
+
+ tmp = 0xCAFEDEAD;
+ writel(tmp, ptr);
+
+ r = radeon_ib_get(rdev, ring->idx, &ib, NULL, 256);
+ if (r) {
+ DRM_ERROR("radeon: failed to get ib (%d).\n", r);
+ return r;
+ }
+
+ ib.ptr[0] = DMA_PACKET(DMA_PACKET_WRITE, 0, 0, 1);
+ ib.ptr[1] = rdev->vram_scratch.gpu_addr & 0xfffffffc;
+ ib.ptr[2] = upper_32_bits(rdev->vram_scratch.gpu_addr) & 0xff;
+ ib.ptr[3] = 0xDEADBEEF;
+ ib.length_dw = 4;
+
+ r = radeon_ib_schedule(rdev, &ib, NULL);
+ if (r) {
+ radeon_ib_free(rdev, &ib);
+ DRM_ERROR("radeon: failed to schedule ib (%d).\n", r);
+ return r;
+ }
+ r = radeon_fence_wait(ib.fence, false);
+ if (r) {
+ DRM_ERROR("radeon: fence wait failed (%d).\n", r);
+ return r;
+ }
+ for (i = 0; i < rdev->usec_timeout; i++) {
+ tmp = readl(ptr);
+ if (tmp == 0xDEADBEEF)
+ break;
+ DRM_UDELAY(1);
+ }
+ if (i < rdev->usec_timeout) {
+ DRM_INFO("ib test on ring %d succeeded in %u usecs\n", ib.fence->ring, i);
+ } else {
+ DRM_ERROR("radeon: ib test failed (0x%08X)\n", tmp);
+ r = -EINVAL;
+ }
+ radeon_ib_free(rdev, &ib);
+ return r;
+}
+
+/**
+ * r600_dma_ring_ib_execute - Schedule an IB on the DMA engine
+ *
+ * @rdev: radeon_device pointer
+ * @ib: IB object to schedule
+ *
+ * Schedule an IB in the DMA ring (r6xx-r7xx).
+ */
+void r600_dma_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib)
+{
+ struct radeon_ring *ring = &rdev->ring[ib->ring];
+
+ if (rdev->wb.enabled) {
+ u32 next_rptr = ring->wptr + 4;
+ while ((next_rptr & 7) != 5)
+ next_rptr++;
+ next_rptr += 3;
+ radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_WRITE, 0, 0, 1));
+ radeon_ring_write(ring, ring->next_rptr_gpu_addr & 0xfffffffc);
+ radeon_ring_write(ring, upper_32_bits(ring->next_rptr_gpu_addr) & 0xff);
+ radeon_ring_write(ring, next_rptr);
+ }
+
+ /* The indirect buffer packet must end on an 8 DW boundary in the DMA ring.
+ * Pad as necessary with NOPs.
+ */
+ while ((ring->wptr & 7) != 5)
+ radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_NOP, 0, 0, 0));
+ radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_INDIRECT_BUFFER, 0, 0, 0));
+ radeon_ring_write(ring, (ib->gpu_addr & 0xFFFFFFE0));
+ radeon_ring_write(ring, (ib->length_dw << 16) | (upper_32_bits(ib->gpu_addr) & 0xFF));
+
+}
+
/*
* Interrupts
*
@@ -2859,6 +3312,8 @@ static void r600_disable_interrupt_state(struct radeon_device *rdev)
u32 tmp;
WREG32(CP_INT_CNTL, CNTX_BUSY_INT_ENABLE | CNTX_EMPTY_INT_ENABLE);
+ tmp = RREG32(DMA_CNTL) & ~TRAP_ENABLE;
+ WREG32(DMA_CNTL, tmp);
WREG32(GRBM_INT_CNTL, 0);
WREG32(DxMODE_INT_MASK, 0);
WREG32(D1GRPH_INTERRUPT_CONTROL, 0);
@@ -3000,6 +3455,7 @@ int r600_irq_set(struct radeon_device *rdev)
u32 grbm_int_cntl = 0;
u32 hdmi0, hdmi1;
u32 d1grph = 0, d2grph = 0;
+ u32 dma_cntl;
if (!rdev->irq.installed) {
WARN(1, "Can't enable IRQ/MSI because no handler is installed\n");
@@ -3034,12 +3490,19 @@ int r600_irq_set(struct radeon_device *rdev)
hdmi0 = RREG32(HDMI0_AUDIO_PACKET_CONTROL) & ~HDMI0_AZ_FORMAT_WTRIG_MASK;
hdmi1 = RREG32(HDMI1_AUDIO_PACKET_CONTROL) & ~HDMI0_AZ_FORMAT_WTRIG_MASK;
}
+ dma_cntl = RREG32(DMA_CNTL) & ~TRAP_ENABLE;
if (atomic_read(&rdev->irq.ring_int[RADEON_RING_TYPE_GFX_INDEX])) {
DRM_DEBUG("r600_irq_set: sw int\n");
cp_int_cntl |= RB_INT_ENABLE;
cp_int_cntl |= TIME_STAMP_INT_ENABLE;
}
+
+ if (atomic_read(&rdev->irq.ring_int[R600_RING_TYPE_DMA_INDEX])) {
+ DRM_DEBUG("r600_irq_set: sw int dma\n");
+ dma_cntl |= TRAP_ENABLE;
+ }
+
if (rdev->irq.crtc_vblank_int[0] ||
atomic_read(&rdev->irq.pflip[0])) {
DRM_DEBUG("r600_irq_set: vblank 0\n");
@@ -3084,6 +3547,7 @@ int r600_irq_set(struct radeon_device *rdev)
}
WREG32(CP_INT_CNTL, cp_int_cntl);
+ WREG32(DMA_CNTL, dma_cntl);
WREG32(DxMODE_INT_MASK, mode_int);
WREG32(D1GRPH_INTERRUPT_CONTROL, d1grph);
WREG32(D2GRPH_INTERRUPT_CONTROL, d2grph);
@@ -3463,6 +3927,10 @@ restart_ih:
DRM_DEBUG("IH: CP EOP\n");
radeon_fence_process(rdev, RADEON_RING_TYPE_GFX_INDEX);
break;
+ case 224: /* DMA trap event */
+ DRM_DEBUG("IH: DMA trap\n");
+ radeon_fence_process(rdev, R600_RING_TYPE_DMA_INDEX);
+ break;
case 233: /* GUI IDLE */
DRM_DEBUG("IH: GUI idle\n");
break;
diff --git a/drivers/gpu/drm/radeon/r600_cs.c b/drivers/gpu/drm/radeon/r600_cs.c
index 211c40252fe0..0be768be530c 100644
--- a/drivers/gpu/drm/radeon/r600_cs.c
+++ b/drivers/gpu/drm/radeon/r600_cs.c
@@ -657,87 +657,30 @@ static int r600_cs_track_validate_db(struct radeon_cs_parser *p)
/* nby is npipes htiles aligned == npipes * 8 pixel aligned */
nby = round_up(nby, track->npipes * 8);
} else {
- /* htile widht & nby (8 or 4) make 2 bits number */
- tmp = track->htile_surface & 3;
+ /* always assume 8x8 htile */
/* align is htile align * 8, htile align vary according to
* number of pipe and tile width and nby
*/
switch (track->npipes) {
case 8:
- switch (tmp) {
- case 3: /* HTILE_WIDTH = 8 & HTILE_HEIGHT = 8*/
- nbx = round_up(nbx, 64 * 8);
- nby = round_up(nby, 64 * 8);
- break;
- case 2: /* HTILE_WIDTH = 4 & HTILE_HEIGHT = 8*/
- case 1: /* HTILE_WIDTH = 8 & HTILE_HEIGHT = 4*/
- nbx = round_up(nbx, 64 * 8);
- nby = round_up(nby, 32 * 8);
- break;
- case 0: /* HTILE_WIDTH = 4 & HTILE_HEIGHT = 4*/
- nbx = round_up(nbx, 32 * 8);
- nby = round_up(nby, 32 * 8);
- break;
- default:
- return -EINVAL;
- }
+ /* HTILE_WIDTH = 8 & HTILE_HEIGHT = 8*/
+ nbx = round_up(nbx, 64 * 8);
+ nby = round_up(nby, 64 * 8);
break;
case 4:
- switch (tmp) {
- case 3: /* HTILE_WIDTH = 8 & HTILE_HEIGHT = 8*/
- nbx = round_up(nbx, 64 * 8);
- nby = round_up(nby, 32 * 8);
- break;
- case 2: /* HTILE_WIDTH = 4 & HTILE_HEIGHT = 8*/
- case 1: /* HTILE_WIDTH = 8 & HTILE_HEIGHT = 4*/
- nbx = round_up(nbx, 32 * 8);
- nby = round_up(nby, 32 * 8);
- break;
- case 0: /* HTILE_WIDTH = 4 & HTILE_HEIGHT = 4*/
- nbx = round_up(nbx, 32 * 8);
- nby = round_up(nby, 16 * 8);
- break;
- default:
- return -EINVAL;
- }
+ /* HTILE_WIDTH = 8 & HTILE_HEIGHT = 8*/
+ nbx = round_up(nbx, 64 * 8);
+ nby = round_up(nby, 32 * 8);
break;
case 2:
- switch (tmp) {
- case 3: /* HTILE_WIDTH = 8 & HTILE_HEIGHT = 8*/
- nbx = round_up(nbx, 32 * 8);
- nby = round_up(nby, 32 * 8);
- break;
- case 2: /* HTILE_WIDTH = 4 & HTILE_HEIGHT = 8*/
- case 1: /* HTILE_WIDTH = 8 & HTILE_HEIGHT = 4*/
- nbx = round_up(nbx, 32 * 8);
- nby = round_up(nby, 16 * 8);
- break;
- case 0: /* HTILE_WIDTH = 4 & HTILE_HEIGHT = 4*/
- nbx = round_up(nbx, 16 * 8);
- nby = round_up(nby, 16 * 8);
- break;
- default:
- return -EINVAL;
- }
+ /* HTILE_WIDTH = 8 & HTILE_HEIGHT = 8*/
+ nbx = round_up(nbx, 32 * 8);
+ nby = round_up(nby, 32 * 8);
break;
case 1:
- switch (tmp) {
- case 3: /* HTILE_WIDTH = 8 & HTILE_HEIGHT = 8*/
- nbx = round_up(nbx, 32 * 8);
- nby = round_up(nby, 16 * 8);
- break;
- case 2: /* HTILE_WIDTH = 4 & HTILE_HEIGHT = 8*/
- case 1: /* HTILE_WIDTH = 8 & HTILE_HEIGHT = 4*/
- nbx = round_up(nbx, 16 * 8);
- nby = round_up(nby, 16 * 8);
- break;
- case 0: /* HTILE_WIDTH = 4 & HTILE_HEIGHT = 4*/
- nbx = round_up(nbx, 16 * 8);
- nby = round_up(nby, 8 * 8);
- break;
- default:
- return -EINVAL;
- }
+ /* HTILE_WIDTH = 8 & HTILE_HEIGHT = 8*/
+ nbx = round_up(nbx, 32 * 8);
+ nby = round_up(nby, 16 * 8);
break;
default:
dev_warn(p->dev, "%s:%d invalid num pipes %d\n",
@@ -746,9 +689,10 @@ static int r600_cs_track_validate_db(struct radeon_cs_parser *p)
}
}
/* compute number of htile */
- nbx = G_028D24_HTILE_WIDTH(track->htile_surface) ? nbx / 8 : nbx / 4;
- nby = G_028D24_HTILE_HEIGHT(track->htile_surface) ? nby / 8 : nby / 4;
- size = nbx * nby * 4;
+ nbx = nbx >> 3;
+ nby = nby >> 3;
+ /* size must be aligned on npipes * 2K boundary */
+ size = roundup(nbx * nby * 4, track->npipes * (2 << 10));
size += track->htile_offset;
if (size > radeon_bo_size(track->htile_bo)) {
@@ -1492,6 +1436,8 @@ static int r600_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
break;
case DB_HTILE_SURFACE:
track->htile_surface = radeon_get_ib_value(p, idx);
+ /* force 8x8 htile width and height */
+ ib[idx] |= 3;
track->db_dirty = true;
break;
case SQ_PGM_START_FS:
@@ -1949,6 +1895,78 @@ static int r600_packet3_check(struct radeon_cs_parser *p,
ib[idx+2] = upper_32_bits(offset) & 0xff;
}
break;
+ case PACKET3_CP_DMA:
+ {
+ u32 command, size;
+ u64 offset, tmp;
+ if (pkt->count != 4) {
+ DRM_ERROR("bad CP DMA\n");
+ return -EINVAL;
+ }
+ command = radeon_get_ib_value(p, idx+4);
+ size = command & 0x1fffff;
+ if (command & PACKET3_CP_DMA_CMD_SAS) {
+ /* src address space is register */
+ DRM_ERROR("CP DMA SAS not supported\n");
+ return -EINVAL;
+ } else {
+ if (command & PACKET3_CP_DMA_CMD_SAIC) {
+ DRM_ERROR("CP DMA SAIC only supported for registers\n");
+ return -EINVAL;
+ }
+ /* src address space is memory */
+ r = r600_cs_packet_next_reloc(p, &reloc);
+ if (r) {
+ DRM_ERROR("bad CP DMA SRC\n");
+ return -EINVAL;
+ }
+
+ tmp = radeon_get_ib_value(p, idx) +
+ ((u64)(radeon_get_ib_value(p, idx+1) & 0xff) << 32);
+
+ offset = reloc->lobj.gpu_offset + tmp;
+
+ if ((tmp + size) > radeon_bo_size(reloc->robj)) {
+ dev_warn(p->dev, "CP DMA src buffer too small (%llu %lu)\n",
+ tmp + size, radeon_bo_size(reloc->robj));
+ return -EINVAL;
+ }
+
+ ib[idx] = offset;
+ ib[idx+1] = (ib[idx+1] & 0xffffff00) | (upper_32_bits(offset) & 0xff);
+ }
+ if (command & PACKET3_CP_DMA_CMD_DAS) {
+ /* dst address space is register */
+ DRM_ERROR("CP DMA DAS not supported\n");
+ return -EINVAL;
+ } else {
+ /* dst address space is memory */
+ if (command & PACKET3_CP_DMA_CMD_DAIC) {
+ DRM_ERROR("CP DMA DAIC only supported for registers\n");
+ return -EINVAL;
+ }
+ r = r600_cs_packet_next_reloc(p, &reloc);
+ if (r) {
+ DRM_ERROR("bad CP DMA DST\n");
+ return -EINVAL;
+ }
+
+ tmp = radeon_get_ib_value(p, idx+2) +
+ ((u64)(radeon_get_ib_value(p, idx+3) & 0xff) << 32);
+
+ offset = reloc->lobj.gpu_offset + tmp;
+
+ if ((tmp + size) > radeon_bo_size(reloc->robj)) {
+ dev_warn(p->dev, "CP DMA dst buffer too small (%llu %lu)\n",
+ tmp + size, radeon_bo_size(reloc->robj));
+ return -EINVAL;
+ }
+
+ ib[idx+2] = offset;
+ ib[idx+3] = upper_32_bits(offset) & 0xff;
+ }
+ break;
+ }
case PACKET3_SURFACE_SYNC:
if (pkt->count != 3) {
DRM_ERROR("bad SURFACE_SYNC\n");
@@ -2496,3 +2514,196 @@ void r600_cs_legacy_init(void)
{
r600_cs_packet_next_reloc = &r600_cs_packet_next_reloc_nomm;
}
+
+/*
+ * DMA
+ */
+/**
+ * r600_dma_cs_next_reloc() - parse next reloc
+ * @p: parser structure holding parsing context.
+ * @cs_reloc: reloc informations
+ *
+ * Return the next reloc, do bo validation and compute
+ * GPU offset using the provided start.
+ **/
+int r600_dma_cs_next_reloc(struct radeon_cs_parser *p,
+ struct radeon_cs_reloc **cs_reloc)
+{
+ struct radeon_cs_chunk *relocs_chunk;
+ unsigned idx;
+
+ if (p->chunk_relocs_idx == -1) {
+ DRM_ERROR("No relocation chunk !\n");
+ return -EINVAL;
+ }
+ *cs_reloc = NULL;
+ relocs_chunk = &p->chunks[p->chunk_relocs_idx];
+ idx = p->dma_reloc_idx;
+ if (idx >= relocs_chunk->length_dw) {
+ DRM_ERROR("Relocs at %d after relocations chunk end %d !\n",
+ idx, relocs_chunk->length_dw);
+ return -EINVAL;
+ }
+ *cs_reloc = p->relocs_ptr[idx];
+ p->dma_reloc_idx++;
+ return 0;
+}
+
+#define GET_DMA_CMD(h) (((h) & 0xf0000000) >> 28)
+#define GET_DMA_COUNT(h) ((h) & 0x0000ffff)
+#define GET_DMA_T(h) (((h) & 0x00800000) >> 23)
+
+/**
+ * r600_dma_cs_parse() - parse the DMA IB
+ * @p: parser structure holding parsing context.
+ *
+ * Parses the DMA IB from the CS ioctl and updates
+ * the GPU addresses based on the reloc information and
+ * checks for errors. (R6xx-R7xx)
+ * Returns 0 for success and an error on failure.
+ **/
+int r600_dma_cs_parse(struct radeon_cs_parser *p)
+{
+ struct radeon_cs_chunk *ib_chunk = &p->chunks[p->chunk_ib_idx];
+ struct radeon_cs_reloc *src_reloc, *dst_reloc;
+ u32 header, cmd, count, tiled;
+ volatile u32 *ib = p->ib.ptr;
+ u32 idx, idx_value;
+ u64 src_offset, dst_offset;
+ int r;
+
+ do {
+ if (p->idx >= ib_chunk->length_dw) {
+ DRM_ERROR("Can not parse packet at %d after CS end %d !\n",
+ p->idx, ib_chunk->length_dw);
+ return -EINVAL;
+ }
+ idx = p->idx;
+ header = radeon_get_ib_value(p, idx);
+ cmd = GET_DMA_CMD(header);
+ count = GET_DMA_COUNT(header);
+ tiled = GET_DMA_T(header);
+
+ switch (cmd) {
+ case DMA_PACKET_WRITE:
+ r = r600_dma_cs_next_reloc(p, &dst_reloc);
+ if (r) {
+ DRM_ERROR("bad DMA_PACKET_WRITE\n");
+ return -EINVAL;
+ }
+ if (tiled) {
+ dst_offset = ib[idx+1];
+ dst_offset <<= 8;
+
+ ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset >> 8);
+ p->idx += count + 5;
+ } else {
+ dst_offset = ib[idx+1];
+ dst_offset |= ((u64)(ib[idx+2] & 0xff)) << 32;
+
+ ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset & 0xfffffffc);
+ ib[idx+2] += upper_32_bits(dst_reloc->lobj.gpu_offset) & 0xff;
+ p->idx += count + 3;
+ }
+ if ((dst_offset + (count * 4)) > radeon_bo_size(dst_reloc->robj)) {
+ dev_warn(p->dev, "DMA write buffer too small (%llu %lu)\n",
+ dst_offset + (count * 4), radeon_bo_size(dst_reloc->robj));
+ return -EINVAL;
+ }
+ break;
+ case DMA_PACKET_COPY:
+ r = r600_dma_cs_next_reloc(p, &src_reloc);
+ if (r) {
+ DRM_ERROR("bad DMA_PACKET_COPY\n");
+ return -EINVAL;
+ }
+ r = r600_dma_cs_next_reloc(p, &dst_reloc);
+ if (r) {
+ DRM_ERROR("bad DMA_PACKET_COPY\n");
+ return -EINVAL;
+ }
+ if (tiled) {
+ idx_value = radeon_get_ib_value(p, idx + 2);
+ /* detile bit */
+ if (idx_value & (1 << 31)) {
+ /* tiled src, linear dst */
+ src_offset = ib[idx+1];
+ src_offset <<= 8;
+ ib[idx+1] += (u32)(src_reloc->lobj.gpu_offset >> 8);
+
+ dst_offset = ib[idx+5];
+ dst_offset |= ((u64)(ib[idx+6] & 0xff)) << 32;
+ ib[idx+5] += (u32)(dst_reloc->lobj.gpu_offset & 0xfffffffc);
+ ib[idx+6] += upper_32_bits(dst_reloc->lobj.gpu_offset) & 0xff;
+ } else {
+ /* linear src, tiled dst */
+ src_offset = ib[idx+5];
+ src_offset |= ((u64)(ib[idx+6] & 0xff)) << 32;
+ ib[idx+5] += (u32)(src_reloc->lobj.gpu_offset & 0xfffffffc);
+ ib[idx+6] += upper_32_bits(src_reloc->lobj.gpu_offset) & 0xff;
+
+ dst_offset = ib[idx+1];
+ dst_offset <<= 8;
+ ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset >> 8);
+ }
+ p->idx += 7;
+ } else {
+ src_offset = ib[idx+2];
+ src_offset |= ((u64)(ib[idx+4] & 0xff)) << 32;
+ dst_offset = ib[idx+1];
+ dst_offset |= ((u64)(ib[idx+3] & 0xff)) << 32;
+
+ ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset & 0xfffffffc);
+ ib[idx+2] += (u32)(src_reloc->lobj.gpu_offset & 0xfffffffc);
+ ib[idx+3] += upper_32_bits(dst_reloc->lobj.gpu_offset) & 0xff;
+ ib[idx+4] += upper_32_bits(src_reloc->lobj.gpu_offset) & 0xff;
+ p->idx += 5;
+ }
+ if ((src_offset + (count * 4)) > radeon_bo_size(src_reloc->robj)) {
+ dev_warn(p->dev, "DMA copy src buffer too small (%llu %lu)\n",
+ src_offset + (count * 4), radeon_bo_size(src_reloc->robj));
+ return -EINVAL;
+ }
+ if ((dst_offset + (count * 4)) > radeon_bo_size(dst_reloc->robj)) {
+ dev_warn(p->dev, "DMA write dst buffer too small (%llu %lu)\n",
+ dst_offset + (count * 4), radeon_bo_size(dst_reloc->robj));
+ return -EINVAL;
+ }
+ break;
+ case DMA_PACKET_CONSTANT_FILL:
+ if (p->family < CHIP_RV770) {
+ DRM_ERROR("Constant Fill is 7xx only !\n");
+ return -EINVAL;
+ }
+ r = r600_dma_cs_next_reloc(p, &dst_reloc);
+ if (r) {
+ DRM_ERROR("bad DMA_PACKET_WRITE\n");
+ return -EINVAL;
+ }
+ dst_offset = ib[idx+1];
+ dst_offset |= ((u64)(ib[idx+3] & 0x00ff0000)) << 16;
+ if ((dst_offset + (count * 4)) > radeon_bo_size(dst_reloc->robj)) {
+ dev_warn(p->dev, "DMA constant fill buffer too small (%llu %lu)\n",
+ dst_offset + (count * 4), radeon_bo_size(dst_reloc->robj));
+ return -EINVAL;
+ }
+ ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset & 0xfffffffc);
+ ib[idx+3] += (upper_32_bits(dst_reloc->lobj.gpu_offset) << 16) & 0x00ff0000;
+ p->idx += 4;
+ break;
+ case DMA_PACKET_NOP:
+ p->idx += 1;
+ break;
+ default:
+ DRM_ERROR("Unknown packet type %d at %d !\n", cmd, idx);
+ return -EINVAL;
+ }
+ } while (p->idx < p->chunks[p->chunk_ib_idx].length_dw);
+#if 0
+ for (r = 0; r < p->ib->length_dw; r++) {
+ printk(KERN_INFO "%05d 0x%08X\n", r, p->ib.ptr[r]);
+ mdelay(1);
+ }
+#endif
+ return 0;
+}
diff --git a/drivers/gpu/drm/radeon/r600_reg.h b/drivers/gpu/drm/radeon/r600_reg.h
index 2b960cb5c18a..909219b1bf80 100644
--- a/drivers/gpu/drm/radeon/r600_reg.h
+++ b/drivers/gpu/drm/radeon/r600_reg.h
@@ -96,6 +96,15 @@
#define R600_CONFIG_F0_BASE 0x542C
#define R600_CONFIG_APER_SIZE 0x5430
+#define R600_BIF_FB_EN 0x5490
+#define R600_FB_READ_EN (1 << 0)
+#define R600_FB_WRITE_EN (1 << 1)
+
+#define R600_CITF_CNTL 0x200c
+#define R600_BLACKOUT_MASK 0x00000003
+
+#define R700_MC_CITF_CNTL 0x25c0
+
#define R600_ROM_CNTL 0x1600
# define R600_SCK_OVERWRITE (1 << 1)
# define R600_SCK_PRESCALE_CRYSTAL_CLK_SHIFT 28
diff --git a/drivers/gpu/drm/radeon/r600d.h b/drivers/gpu/drm/radeon/r600d.h
index fa6f37099ba9..4a53402b1852 100644
--- a/drivers/gpu/drm/radeon/r600d.h
+++ b/drivers/gpu/drm/radeon/r600d.h
@@ -590,9 +590,59 @@
#define WAIT_2D_IDLECLEAN_bit (1 << 16)
#define WAIT_3D_IDLECLEAN_bit (1 << 17)
+/* async DMA */
+#define DMA_TILING_CONFIG 0x3ec4
+#define DMA_CONFIG 0x3e4c
+
+#define DMA_RB_CNTL 0xd000
+# define DMA_RB_ENABLE (1 << 0)
+# define DMA_RB_SIZE(x) ((x) << 1) /* log2 */
+# define DMA_RB_SWAP_ENABLE (1 << 9) /* 8IN32 */
+# define DMA_RPTR_WRITEBACK_ENABLE (1 << 12)
+# define DMA_RPTR_WRITEBACK_SWAP_ENABLE (1 << 13) /* 8IN32 */
+# define DMA_RPTR_WRITEBACK_TIMER(x) ((x) << 16) /* log2 */
+#define DMA_RB_BASE 0xd004
+#define DMA_RB_RPTR 0xd008
+#define DMA_RB_WPTR 0xd00c
+
+#define DMA_RB_RPTR_ADDR_HI 0xd01c
+#define DMA_RB_RPTR_ADDR_LO 0xd020
+
+#define DMA_IB_CNTL 0xd024
+# define DMA_IB_ENABLE (1 << 0)
+# define DMA_IB_SWAP_ENABLE (1 << 4)
+#define DMA_IB_RPTR 0xd028
+#define DMA_CNTL 0xd02c
+# define TRAP_ENABLE (1 << 0)
+# define SEM_INCOMPLETE_INT_ENABLE (1 << 1)
+# define SEM_WAIT_INT_ENABLE (1 << 2)
+# define DATA_SWAP_ENABLE (1 << 3)
+# define FENCE_SWAP_ENABLE (1 << 4)
+# define CTXEMPTY_INT_ENABLE (1 << 28)
+#define DMA_STATUS_REG 0xd034
+# define DMA_IDLE (1 << 0)
+#define DMA_SEM_INCOMPLETE_TIMER_CNTL 0xd044
+#define DMA_SEM_WAIT_FAIL_TIMER_CNTL 0xd048
+#define DMA_MODE 0xd0bc
+
+/* async DMA packets */
+#define DMA_PACKET(cmd, t, s, n) ((((cmd) & 0xF) << 28) | \
+ (((t) & 0x1) << 23) | \
+ (((s) & 0x1) << 22) | \
+ (((n) & 0xFFFF) << 0))
+/* async DMA Packet types */
+#define DMA_PACKET_WRITE 0x2
+#define DMA_PACKET_COPY 0x3
+#define DMA_PACKET_INDIRECT_BUFFER 0x4
+#define DMA_PACKET_SEMAPHORE 0x5
+#define DMA_PACKET_FENCE 0x6
+#define DMA_PACKET_TRAP 0x7
+#define DMA_PACKET_CONSTANT_FILL 0xd /* 7xx only */
+#define DMA_PACKET_NOP 0xf
+
#define IH_RB_CNTL 0x3e00
# define IH_RB_ENABLE (1 << 0)
-# define IH_IB_SIZE(x) ((x) << 1) /* log2 */
+# define IH_RB_SIZE(x) ((x) << 1) /* log2 */
# define IH_RB_FULL_DRAIN_ENABLE (1 << 6)
# define IH_WPTR_WRITEBACK_ENABLE (1 << 8)
# define IH_WPTR_WRITEBACK_TIMER(x) ((x) << 9) /* log2 */
@@ -637,7 +687,9 @@
#define TN_RLC_CLEAR_STATE_RESTORE_BASE 0x3f20
#define SRBM_SOFT_RESET 0xe60
+# define SOFT_RESET_DMA (1 << 12)
# define SOFT_RESET_RLC (1 << 13)
+# define RV770_SOFT_RESET_DMA (1 << 20)
#define CP_INT_CNTL 0xc124
# define CNTX_BUSY_INT_ENABLE (1 << 19)
@@ -1134,6 +1186,38 @@
#define PACKET3_WAIT_REG_MEM 0x3C
#define PACKET3_MEM_WRITE 0x3D
#define PACKET3_INDIRECT_BUFFER 0x32
+#define PACKET3_CP_DMA 0x41
+/* 1. header
+ * 2. SRC_ADDR_LO [31:0]
+ * 3. CP_SYNC [31] | SRC_ADDR_HI [7:0]
+ * 4. DST_ADDR_LO [31:0]
+ * 5. DST_ADDR_HI [7:0]
+ * 6. COMMAND [29:22] | BYTE_COUNT [20:0]
+ */
+# define PACKET3_CP_DMA_CP_SYNC (1 << 31)
+/* COMMAND */
+# define PACKET3_CP_DMA_CMD_SRC_SWAP(x) ((x) << 23)
+ /* 0 - none
+ * 1 - 8 in 16
+ * 2 - 8 in 32
+ * 3 - 8 in 64
+ */
+# define PACKET3_CP_DMA_CMD_DST_SWAP(x) ((x) << 24)
+ /* 0 - none
+ * 1 - 8 in 16
+ * 2 - 8 in 32
+ * 3 - 8 in 64
+ */
+# define PACKET3_CP_DMA_CMD_SAS (1 << 26)
+ /* 0 - memory
+ * 1 - register
+ */
+# define PACKET3_CP_DMA_CMD_DAS (1 << 27)
+ /* 0 - memory
+ * 1 - register
+ */
+# define PACKET3_CP_DMA_CMD_SAIC (1 << 28)
+# define PACKET3_CP_DMA_CMD_DAIC (1 << 29)
#define PACKET3_SURFACE_SYNC 0x43
# define PACKET3_CB0_DEST_BASE_ENA (1 << 6)
# define PACKET3_TC_ACTION_ENA (1 << 23)
diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h
index 8c42d54c2e26..5dc744d43d12 100644
--- a/drivers/gpu/drm/radeon/radeon.h
+++ b/drivers/gpu/drm/radeon/radeon.h
@@ -109,7 +109,7 @@ extern int radeon_lockup_timeout;
#define RADEON_BIOS_NUM_SCRATCH 8
/* max number of rings */
-#define RADEON_NUM_RINGS 3
+#define RADEON_NUM_RINGS 5
/* fence seq are set to this number when signaled */
#define RADEON_FENCE_SIGNALED_SEQ 0LL
@@ -122,6 +122,11 @@ extern int radeon_lockup_timeout;
#define CAYMAN_RING_TYPE_CP1_INDEX 1
#define CAYMAN_RING_TYPE_CP2_INDEX 2
+/* R600+ has an async dma ring */
+#define R600_RING_TYPE_DMA_INDEX 3
+/* cayman add a second async dma ring */
+#define CAYMAN_RING_TYPE_DMA1_INDEX 4
+
/* hardcode those limit for now */
#define RADEON_VA_IB_OFFSET (1 << 20)
#define RADEON_VA_RESERVED_SIZE (8 << 20)
@@ -313,6 +318,7 @@ struct radeon_bo {
struct list_head list;
/* Protected by tbo.reserved */
u32 placements[3];
+ u32 busy_placements[3];
struct ttm_placement placement;
struct ttm_buffer_object tbo;
struct ttm_bo_kmap_obj kmap;
@@ -787,6 +793,15 @@ int radeon_ring_init(struct radeon_device *rdev, struct radeon_ring *cp, unsigne
void radeon_ring_fini(struct radeon_device *rdev, struct radeon_ring *cp);
+/* r600 async dma */
+void r600_dma_stop(struct radeon_device *rdev);
+int r600_dma_resume(struct radeon_device *rdev);
+void r600_dma_fini(struct radeon_device *rdev);
+
+void cayman_dma_stop(struct radeon_device *rdev);
+int cayman_dma_resume(struct radeon_device *rdev);
+void cayman_dma_fini(struct radeon_device *rdev);
+
/*
* CS.
*/
@@ -824,6 +839,7 @@ struct radeon_cs_parser {
struct radeon_cs_reloc *relocs;
struct radeon_cs_reloc **relocs_ptr;
struct list_head validated;
+ unsigned dma_reloc_idx;
/* indices of various chunks */
int chunk_ib_idx;
int chunk_relocs_idx;
@@ -883,7 +899,9 @@ struct radeon_wb {
#define RADEON_WB_CP_RPTR_OFFSET 1024
#define RADEON_WB_CP1_RPTR_OFFSET 1280
#define RADEON_WB_CP2_RPTR_OFFSET 1536
+#define R600_WB_DMA_RPTR_OFFSET 1792
#define R600_WB_IH_WPTR_OFFSET 2048
+#define CAYMAN_WB_DMA1_RPTR_OFFSET 2304
#define R600_WB_EVENT_OFFSET 3072
/**
@@ -1539,6 +1557,8 @@ struct radeon_device {
/* Register mmio */
resource_size_t rmmio_base;
resource_size_t rmmio_size;
+ /* protects concurrent MM_INDEX/DATA based register access */
+ spinlock_t mmio_idx_lock;
void __iomem *rmmio;
radeon_rreg_t mc_rreg;
radeon_wreg_t mc_wreg;
@@ -1614,8 +1634,10 @@ int radeon_device_init(struct radeon_device *rdev,
void radeon_device_fini(struct radeon_device *rdev);
int radeon_gpu_wait_for_idle(struct radeon_device *rdev);
-uint32_t r100_mm_rreg(struct radeon_device *rdev, uint32_t reg);
-void r100_mm_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v);
+uint32_t r100_mm_rreg(struct radeon_device *rdev, uint32_t reg,
+ bool always_indirect);
+void r100_mm_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v,
+ bool always_indirect);
u32 r100_io_rreg(struct radeon_device *rdev, u32 reg);
void r100_io_wreg(struct radeon_device *rdev, u32 reg, u32 v);
@@ -1631,9 +1653,11 @@ void r100_io_wreg(struct radeon_device *rdev, u32 reg, u32 v);
#define WREG8(reg, v) writeb(v, (rdev->rmmio) + (reg))
#define RREG16(reg) readw((rdev->rmmio) + (reg))
#define WREG16(reg, v) writew(v, (rdev->rmmio) + (reg))
-#define RREG32(reg) r100_mm_rreg(rdev, (reg))
-#define DREG32(reg) printk(KERN_INFO "REGISTER: " #reg " : 0x%08X\n", r100_mm_rreg(rdev, (reg)))
-#define WREG32(reg, v) r100_mm_wreg(rdev, (reg), (v))
+#define RREG32(reg) r100_mm_rreg(rdev, (reg), false)
+#define RREG32_IDX(reg) r100_mm_rreg(rdev, (reg), true)
+#define DREG32(reg) printk(KERN_INFO "REGISTER: " #reg " : 0x%08X\n", r100_mm_rreg(rdev, (reg), false))
+#define WREG32(reg, v) r100_mm_wreg(rdev, (reg), (v), false)
+#define WREG32_IDX(reg, v) r100_mm_wreg(rdev, (reg), (v), true)
#define REG_SET(FIELD, v) (((v) << FIELD##_SHIFT) & FIELD##_MASK)
#define REG_GET(FIELD, v) (((v) << FIELD##_SHIFT) & FIELD##_MASK)
#define RREG32_PLL(reg) rdev->pll_rreg(rdev, (reg))
@@ -1658,7 +1682,7 @@ void r100_io_wreg(struct radeon_device *rdev, u32 reg, u32 v);
tmp_ |= ((val) & ~(mask)); \
WREG32_PLL(reg, tmp_); \
} while (0)
-#define DREG32_SYS(sqf, rdev, reg) seq_printf((sqf), #reg " : 0x%08X\n", r100_mm_rreg((rdev), (reg)))
+#define DREG32_SYS(sqf, rdev, reg) seq_printf((sqf), #reg " : 0x%08X\n", r100_mm_rreg((rdev), (reg), false))
#define RREG32_IO(reg) r100_io_rreg(rdev, (reg))
#define WREG32_IO(reg, v) r100_io_wreg(rdev, (reg), (v))
diff --git a/drivers/gpu/drm/radeon/radeon_agp.c b/drivers/gpu/drm/radeon/radeon_agp.c
index 10ea17a6b2a6..42433344cb1b 100644
--- a/drivers/gpu/drm/radeon/radeon_agp.c
+++ b/drivers/gpu/drm/radeon/radeon_agp.c
@@ -69,9 +69,12 @@ static struct radeon_agpmode_quirk radeon_agpmode_quirk_list[] = {
/* Intel 82830 830 Chipset Host Bridge / Mobility M6 LY Needs AGPMode 2 (fdo #17360)*/
{ PCI_VENDOR_ID_INTEL, 0x3575, PCI_VENDOR_ID_ATI, 0x4c59,
PCI_VENDOR_ID_DELL, 0x00e3, 2},
- /* Intel 82852/82855 host bridge / Mobility FireGL 9000 R250 Needs AGPMode 1 (lp #296617) */
+ /* Intel 82852/82855 host bridge / Mobility FireGL 9000 RV250 Needs AGPMode 1 (lp #296617) */
{ PCI_VENDOR_ID_INTEL, 0x3580, PCI_VENDOR_ID_ATI, 0x4c66,
PCI_VENDOR_ID_DELL, 0x0149, 1},
+ /* Intel 82855PM host bridge / Mobility FireGL 9000 RV250 Needs AGPMode 1 for suspend/resume */
+ { PCI_VENDOR_ID_INTEL, 0x3340, PCI_VENDOR_ID_ATI, 0x4c66,
+ PCI_VENDOR_ID_IBM, 0x0531, 1},
/* Intel 82852/82855 host bridge / Mobility 9600 M10 RV350 Needs AGPMode 1 (deb #467460) */
{ PCI_VENDOR_ID_INTEL, 0x3580, PCI_VENDOR_ID_ATI, 0x4e50,
0x1025, 0x0061, 1},
diff --git a/drivers/gpu/drm/radeon/radeon_asic.c b/drivers/gpu/drm/radeon/radeon_asic.c
index 654520b95ab7..596bcbe80ed0 100644
--- a/drivers/gpu/drm/radeon/radeon_asic.c
+++ b/drivers/gpu/drm/radeon/radeon_asic.c
@@ -947,6 +947,15 @@ static struct radeon_asic r600_asic = {
.ring_test = &r600_ring_test,
.ib_test = &r600_ib_test,
.is_lockup = &r600_gpu_is_lockup,
+ },
+ [R600_RING_TYPE_DMA_INDEX] = {
+ .ib_execute = &r600_dma_ring_ib_execute,
+ .emit_fence = &r600_dma_fence_ring_emit,
+ .emit_semaphore = &r600_dma_semaphore_ring_emit,
+ .cs_parse = &r600_dma_cs_parse,
+ .ring_test = &r600_dma_ring_test,
+ .ib_test = &r600_dma_ib_test,
+ .is_lockup = &r600_dma_is_lockup,
}
},
.irq = {
@@ -963,10 +972,10 @@ static struct radeon_asic r600_asic = {
.copy = {
.blit = &r600_copy_blit,
.blit_ring_index = RADEON_RING_TYPE_GFX_INDEX,
- .dma = NULL,
- .dma_ring_index = RADEON_RING_TYPE_GFX_INDEX,
- .copy = &r600_copy_blit,
- .copy_ring_index = RADEON_RING_TYPE_GFX_INDEX,
+ .dma = &r600_copy_dma,
+ .dma_ring_index = R600_RING_TYPE_DMA_INDEX,
+ .copy = &r600_copy_dma,
+ .copy_ring_index = R600_RING_TYPE_DMA_INDEX,
},
.surface = {
.set_reg = r600_set_surface_reg,
@@ -1022,6 +1031,15 @@ static struct radeon_asic rs780_asic = {
.ring_test = &r600_ring_test,
.ib_test = &r600_ib_test,
.is_lockup = &r600_gpu_is_lockup,
+ },
+ [R600_RING_TYPE_DMA_INDEX] = {
+ .ib_execute = &r600_dma_ring_ib_execute,
+ .emit_fence = &r600_dma_fence_ring_emit,
+ .emit_semaphore = &r600_dma_semaphore_ring_emit,
+ .cs_parse = &r600_dma_cs_parse,
+ .ring_test = &r600_dma_ring_test,
+ .ib_test = &r600_dma_ib_test,
+ .is_lockup = &r600_dma_is_lockup,
}
},
.irq = {
@@ -1038,10 +1056,10 @@ static struct radeon_asic rs780_asic = {
.copy = {
.blit = &r600_copy_blit,
.blit_ring_index = RADEON_RING_TYPE_GFX_INDEX,
- .dma = NULL,
- .dma_ring_index = RADEON_RING_TYPE_GFX_INDEX,
- .copy = &r600_copy_blit,
- .copy_ring_index = RADEON_RING_TYPE_GFX_INDEX,
+ .dma = &r600_copy_dma,
+ .dma_ring_index = R600_RING_TYPE_DMA_INDEX,
+ .copy = &r600_copy_dma,
+ .copy_ring_index = R600_RING_TYPE_DMA_INDEX,
},
.surface = {
.set_reg = r600_set_surface_reg,
@@ -1097,6 +1115,15 @@ static struct radeon_asic rv770_asic = {
.ring_test = &r600_ring_test,
.ib_test = &r600_ib_test,
.is_lockup = &r600_gpu_is_lockup,
+ },
+ [R600_RING_TYPE_DMA_INDEX] = {
+ .ib_execute = &r600_dma_ring_ib_execute,
+ .emit_fence = &r600_dma_fence_ring_emit,
+ .emit_semaphore = &r600_dma_semaphore_ring_emit,
+ .cs_parse = &r600_dma_cs_parse,
+ .ring_test = &r600_dma_ring_test,
+ .ib_test = &r600_dma_ib_test,
+ .is_lockup = &r600_dma_is_lockup,
}
},
.irq = {
@@ -1113,10 +1140,10 @@ static struct radeon_asic rv770_asic = {
.copy = {
.blit = &r600_copy_blit,
.blit_ring_index = RADEON_RING_TYPE_GFX_INDEX,
- .dma = NULL,
- .dma_ring_index = RADEON_RING_TYPE_GFX_INDEX,
- .copy = &r600_copy_blit,
- .copy_ring_index = RADEON_RING_TYPE_GFX_INDEX,
+ .dma = &r600_copy_dma,
+ .dma_ring_index = R600_RING_TYPE_DMA_INDEX,
+ .copy = &r600_copy_dma,
+ .copy_ring_index = R600_RING_TYPE_DMA_INDEX,
},
.surface = {
.set_reg = r600_set_surface_reg,
@@ -1172,6 +1199,15 @@ static struct radeon_asic evergreen_asic = {
.ring_test = &r600_ring_test,
.ib_test = &r600_ib_test,
.is_lockup = &evergreen_gpu_is_lockup,
+ },
+ [R600_RING_TYPE_DMA_INDEX] = {
+ .ib_execute = &evergreen_dma_ring_ib_execute,
+ .emit_fence = &evergreen_dma_fence_ring_emit,
+ .emit_semaphore = &r600_dma_semaphore_ring_emit,
+ .cs_parse = &evergreen_dma_cs_parse,
+ .ring_test = &r600_dma_ring_test,
+ .ib_test = &r600_dma_ib_test,
+ .is_lockup = &r600_dma_is_lockup,
}
},
.irq = {
@@ -1188,10 +1224,10 @@ static struct radeon_asic evergreen_asic = {
.copy = {
.blit = &r600_copy_blit,
.blit_ring_index = RADEON_RING_TYPE_GFX_INDEX,
- .dma = NULL,
- .dma_ring_index = RADEON_RING_TYPE_GFX_INDEX,
- .copy = &r600_copy_blit,
- .copy_ring_index = RADEON_RING_TYPE_GFX_INDEX,
+ .dma = &evergreen_copy_dma,
+ .dma_ring_index = R600_RING_TYPE_DMA_INDEX,
+ .copy = &evergreen_copy_dma,
+ .copy_ring_index = R600_RING_TYPE_DMA_INDEX,
},
.surface = {
.set_reg = r600_set_surface_reg,
@@ -1248,6 +1284,15 @@ static struct radeon_asic sumo_asic = {
.ib_test = &r600_ib_test,
.is_lockup = &evergreen_gpu_is_lockup,
},
+ [R600_RING_TYPE_DMA_INDEX] = {
+ .ib_execute = &evergreen_dma_ring_ib_execute,
+ .emit_fence = &evergreen_dma_fence_ring_emit,
+ .emit_semaphore = &r600_dma_semaphore_ring_emit,
+ .cs_parse = &evergreen_dma_cs_parse,
+ .ring_test = &r600_dma_ring_test,
+ .ib_test = &r600_dma_ib_test,
+ .is_lockup = &r600_dma_is_lockup,
+ }
},
.irq = {
.set = &evergreen_irq_set,
@@ -1263,10 +1308,10 @@ static struct radeon_asic sumo_asic = {
.copy = {
.blit = &r600_copy_blit,
.blit_ring_index = RADEON_RING_TYPE_GFX_INDEX,
- .dma = NULL,
- .dma_ring_index = RADEON_RING_TYPE_GFX_INDEX,
- .copy = &r600_copy_blit,
- .copy_ring_index = RADEON_RING_TYPE_GFX_INDEX,
+ .dma = &evergreen_copy_dma,
+ .dma_ring_index = R600_RING_TYPE_DMA_INDEX,
+ .copy = &evergreen_copy_dma,
+ .copy_ring_index = R600_RING_TYPE_DMA_INDEX,
},
.surface = {
.set_reg = r600_set_surface_reg,
@@ -1322,6 +1367,15 @@ static struct radeon_asic btc_asic = {
.ring_test = &r600_ring_test,
.ib_test = &r600_ib_test,
.is_lockup = &evergreen_gpu_is_lockup,
+ },
+ [R600_RING_TYPE_DMA_INDEX] = {
+ .ib_execute = &evergreen_dma_ring_ib_execute,
+ .emit_fence = &evergreen_dma_fence_ring_emit,
+ .emit_semaphore = &r600_dma_semaphore_ring_emit,
+ .cs_parse = &evergreen_dma_cs_parse,
+ .ring_test = &r600_dma_ring_test,
+ .ib_test = &r600_dma_ib_test,
+ .is_lockup = &r600_dma_is_lockup,
}
},
.irq = {
@@ -1338,10 +1392,10 @@ static struct radeon_asic btc_asic = {
.copy = {
.blit = &r600_copy_blit,
.blit_ring_index = RADEON_RING_TYPE_GFX_INDEX,
- .dma = NULL,
- .dma_ring_index = RADEON_RING_TYPE_GFX_INDEX,
- .copy = &r600_copy_blit,
- .copy_ring_index = RADEON_RING_TYPE_GFX_INDEX,
+ .dma = &evergreen_copy_dma,
+ .dma_ring_index = R600_RING_TYPE_DMA_INDEX,
+ .copy = &evergreen_copy_dma,
+ .copy_ring_index = R600_RING_TYPE_DMA_INDEX,
},
.surface = {
.set_reg = r600_set_surface_reg,
@@ -1391,7 +1445,7 @@ static struct radeon_asic cayman_asic = {
.vm = {
.init = &cayman_vm_init,
.fini = &cayman_vm_fini,
- .pt_ring_index = RADEON_RING_TYPE_GFX_INDEX,
+ .pt_ring_index = R600_RING_TYPE_DMA_INDEX,
.set_page = &cayman_vm_set_page,
},
.ring = {
@@ -1427,6 +1481,28 @@ static struct radeon_asic cayman_asic = {
.ib_test = &r600_ib_test,
.is_lockup = &evergreen_gpu_is_lockup,
.vm_flush = &cayman_vm_flush,
+ },
+ [R600_RING_TYPE_DMA_INDEX] = {
+ .ib_execute = &cayman_dma_ring_ib_execute,
+ .ib_parse = &evergreen_dma_ib_parse,
+ .emit_fence = &evergreen_dma_fence_ring_emit,
+ .emit_semaphore = &r600_dma_semaphore_ring_emit,
+ .cs_parse = &evergreen_dma_cs_parse,
+ .ring_test = &r600_dma_ring_test,
+ .ib_test = &r600_dma_ib_test,
+ .is_lockup = &cayman_dma_is_lockup,
+ .vm_flush = &cayman_dma_vm_flush,
+ },
+ [CAYMAN_RING_TYPE_DMA1_INDEX] = {
+ .ib_execute = &cayman_dma_ring_ib_execute,
+ .ib_parse = &evergreen_dma_ib_parse,
+ .emit_fence = &evergreen_dma_fence_ring_emit,
+ .emit_semaphore = &r600_dma_semaphore_ring_emit,
+ .cs_parse = &evergreen_dma_cs_parse,
+ .ring_test = &r600_dma_ring_test,
+ .ib_test = &r600_dma_ib_test,
+ .is_lockup = &cayman_dma_is_lockup,
+ .vm_flush = &cayman_dma_vm_flush,
}
},
.irq = {
@@ -1443,10 +1519,10 @@ static struct radeon_asic cayman_asic = {
.copy = {
.blit = &r600_copy_blit,
.blit_ring_index = RADEON_RING_TYPE_GFX_INDEX,
- .dma = NULL,
- .dma_ring_index = RADEON_RING_TYPE_GFX_INDEX,
- .copy = &r600_copy_blit,
- .copy_ring_index = RADEON_RING_TYPE_GFX_INDEX,
+ .dma = &evergreen_copy_dma,
+ .dma_ring_index = R600_RING_TYPE_DMA_INDEX,
+ .copy = &evergreen_copy_dma,
+ .copy_ring_index = R600_RING_TYPE_DMA_INDEX,
},
.surface = {
.set_reg = r600_set_surface_reg,
@@ -1496,7 +1572,7 @@ static struct radeon_asic trinity_asic = {
.vm = {
.init = &cayman_vm_init,
.fini = &cayman_vm_fini,
- .pt_ring_index = RADEON_RING_TYPE_GFX_INDEX,
+ .pt_ring_index = R600_RING_TYPE_DMA_INDEX,
.set_page = &cayman_vm_set_page,
},
.ring = {
@@ -1532,6 +1608,28 @@ static struct radeon_asic trinity_asic = {
.ib_test = &r600_ib_test,
.is_lockup = &evergreen_gpu_is_lockup,
.vm_flush = &cayman_vm_flush,
+ },
+ [R600_RING_TYPE_DMA_INDEX] = {
+ .ib_execute = &cayman_dma_ring_ib_execute,
+ .ib_parse = &evergreen_dma_ib_parse,
+ .emit_fence = &evergreen_dma_fence_ring_emit,
+ .emit_semaphore = &r600_dma_semaphore_ring_emit,
+ .cs_parse = &evergreen_dma_cs_parse,
+ .ring_test = &r600_dma_ring_test,
+ .ib_test = &r600_dma_ib_test,
+ .is_lockup = &cayman_dma_is_lockup,
+ .vm_flush = &cayman_dma_vm_flush,
+ },
+ [CAYMAN_RING_TYPE_DMA1_INDEX] = {
+ .ib_execute = &cayman_dma_ring_ib_execute,
+ .ib_parse = &evergreen_dma_ib_parse,
+ .emit_fence = &evergreen_dma_fence_ring_emit,
+ .emit_semaphore = &r600_dma_semaphore_ring_emit,
+ .cs_parse = &evergreen_dma_cs_parse,
+ .ring_test = &r600_dma_ring_test,
+ .ib_test = &r600_dma_ib_test,
+ .is_lockup = &cayman_dma_is_lockup,
+ .vm_flush = &cayman_dma_vm_flush,
}
},
.irq = {
@@ -1548,10 +1646,10 @@ static struct radeon_asic trinity_asic = {
.copy = {
.blit = &r600_copy_blit,
.blit_ring_index = RADEON_RING_TYPE_GFX_INDEX,
- .dma = NULL,
- .dma_ring_index = RADEON_RING_TYPE_GFX_INDEX,
- .copy = &r600_copy_blit,
- .copy_ring_index = RADEON_RING_TYPE_GFX_INDEX,
+ .dma = &evergreen_copy_dma,
+ .dma_ring_index = R600_RING_TYPE_DMA_INDEX,
+ .copy = &evergreen_copy_dma,
+ .copy_ring_index = R600_RING_TYPE_DMA_INDEX,
},
.surface = {
.set_reg = r600_set_surface_reg,
@@ -1601,7 +1699,7 @@ static struct radeon_asic si_asic = {
.vm = {
.init = &si_vm_init,
.fini = &si_vm_fini,
- .pt_ring_index = RADEON_RING_TYPE_GFX_INDEX,
+ .pt_ring_index = R600_RING_TYPE_DMA_INDEX,
.set_page = &si_vm_set_page,
},
.ring = {
@@ -1637,6 +1735,28 @@ static struct radeon_asic si_asic = {
.ib_test = &r600_ib_test,
.is_lockup = &si_gpu_is_lockup,
.vm_flush = &si_vm_flush,
+ },
+ [R600_RING_TYPE_DMA_INDEX] = {
+ .ib_execute = &cayman_dma_ring_ib_execute,
+ .ib_parse = &evergreen_dma_ib_parse,
+ .emit_fence = &evergreen_dma_fence_ring_emit,
+ .emit_semaphore = &r600_dma_semaphore_ring_emit,
+ .cs_parse = NULL,
+ .ring_test = &r600_dma_ring_test,
+ .ib_test = &r600_dma_ib_test,
+ .is_lockup = &cayman_dma_is_lockup,
+ .vm_flush = &si_dma_vm_flush,
+ },
+ [CAYMAN_RING_TYPE_DMA1_INDEX] = {
+ .ib_execute = &cayman_dma_ring_ib_execute,
+ .ib_parse = &evergreen_dma_ib_parse,
+ .emit_fence = &evergreen_dma_fence_ring_emit,
+ .emit_semaphore = &r600_dma_semaphore_ring_emit,
+ .cs_parse = NULL,
+ .ring_test = &r600_dma_ring_test,
+ .ib_test = &r600_dma_ib_test,
+ .is_lockup = &cayman_dma_is_lockup,
+ .vm_flush = &si_dma_vm_flush,
}
},
.irq = {
@@ -1653,10 +1773,10 @@ static struct radeon_asic si_asic = {
.copy = {
.blit = NULL,
.blit_ring_index = RADEON_RING_TYPE_GFX_INDEX,
- .dma = NULL,
- .dma_ring_index = RADEON_RING_TYPE_GFX_INDEX,
- .copy = NULL,
- .copy_ring_index = RADEON_RING_TYPE_GFX_INDEX,
+ .dma = &si_copy_dma,
+ .dma_ring_index = R600_RING_TYPE_DMA_INDEX,
+ .copy = &si_copy_dma,
+ .copy_ring_index = R600_RING_TYPE_DMA_INDEX,
},
.surface = {
.set_reg = r600_set_surface_reg,
diff --git a/drivers/gpu/drm/radeon/radeon_asic.h b/drivers/gpu/drm/radeon/radeon_asic.h
index 5e3a0e5c6be1..5f4882cc2152 100644
--- a/drivers/gpu/drm/radeon/radeon_asic.h
+++ b/drivers/gpu/drm/radeon/radeon_asic.h
@@ -263,6 +263,7 @@ extern int rs690_mc_wait_for_idle(struct radeon_device *rdev);
struct rv515_mc_save {
u32 vga_render_control;
u32 vga_hdp_control;
+ bool crtc_enabled[2];
};
int rv515_init(struct radeon_device *rdev);
@@ -303,12 +304,21 @@ void r600_pcie_gart_tlb_flush(struct radeon_device *rdev);
uint32_t r600_pciep_rreg(struct radeon_device *rdev, uint32_t reg);
void r600_pciep_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v);
int r600_cs_parse(struct radeon_cs_parser *p);
+int r600_dma_cs_parse(struct radeon_cs_parser *p);
void r600_fence_ring_emit(struct radeon_device *rdev,
struct radeon_fence *fence);
void r600_semaphore_ring_emit(struct radeon_device *rdev,
struct radeon_ring *cp,
struct radeon_semaphore *semaphore,
bool emit_wait);
+void r600_dma_fence_ring_emit(struct radeon_device *rdev,
+ struct radeon_fence *fence);
+void r600_dma_semaphore_ring_emit(struct radeon_device *rdev,
+ struct radeon_ring *ring,
+ struct radeon_semaphore *semaphore,
+ bool emit_wait);
+void r600_dma_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib);
+bool r600_dma_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring);
bool r600_gpu_is_lockup(struct radeon_device *rdev, struct radeon_ring *cp);
int r600_asic_reset(struct radeon_device *rdev);
int r600_set_surface_reg(struct radeon_device *rdev, int reg,
@@ -316,11 +326,16 @@ int r600_set_surface_reg(struct radeon_device *rdev, int reg,
uint32_t offset, uint32_t obj_size);
void r600_clear_surface_reg(struct radeon_device *rdev, int reg);
int r600_ib_test(struct radeon_device *rdev, struct radeon_ring *ring);
+int r600_dma_ib_test(struct radeon_device *rdev, struct radeon_ring *ring);
void r600_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib);
int r600_ring_test(struct radeon_device *rdev, struct radeon_ring *cp);
+int r600_dma_ring_test(struct radeon_device *rdev, struct radeon_ring *cp);
int r600_copy_blit(struct radeon_device *rdev,
uint64_t src_offset, uint64_t dst_offset,
unsigned num_gpu_pages, struct radeon_fence **fence);
+int r600_copy_dma(struct radeon_device *rdev,
+ uint64_t src_offset, uint64_t dst_offset,
+ unsigned num_gpu_pages, struct radeon_fence **fence);
void r600_hpd_init(struct radeon_device *rdev);
void r600_hpd_fini(struct radeon_device *rdev);
bool r600_hpd_sense(struct radeon_device *rdev, enum radeon_hpd_id hpd);
@@ -416,6 +431,7 @@ u32 evergreen_get_vblank_counter(struct radeon_device *rdev, int crtc);
int evergreen_irq_set(struct radeon_device *rdev);
int evergreen_irq_process(struct radeon_device *rdev);
extern int evergreen_cs_parse(struct radeon_cs_parser *p);
+extern int evergreen_dma_cs_parse(struct radeon_cs_parser *p);
extern void evergreen_pm_misc(struct radeon_device *rdev);
extern void evergreen_pm_prepare(struct radeon_device *rdev);
extern void evergreen_pm_finish(struct radeon_device *rdev);
@@ -428,6 +444,14 @@ extern void dce4_wait_for_vblank(struct radeon_device *rdev, int crtc);
void evergreen_disable_interrupt_state(struct radeon_device *rdev);
int evergreen_blit_init(struct radeon_device *rdev);
int evergreen_mc_wait_for_idle(struct radeon_device *rdev);
+void evergreen_dma_fence_ring_emit(struct radeon_device *rdev,
+ struct radeon_fence *fence);
+void evergreen_dma_ring_ib_execute(struct radeon_device *rdev,
+ struct radeon_ib *ib);
+int evergreen_copy_dma(struct radeon_device *rdev,
+ uint64_t src_offset, uint64_t dst_offset,
+ unsigned num_gpu_pages,
+ struct radeon_fence **fence);
/*
* cayman
@@ -449,6 +473,11 @@ void cayman_vm_set_page(struct radeon_device *rdev, uint64_t pe,
uint64_t addr, unsigned count,
uint32_t incr, uint32_t flags);
int evergreen_ib_parse(struct radeon_device *rdev, struct radeon_ib *ib);
+int evergreen_dma_ib_parse(struct radeon_device *rdev, struct radeon_ib *ib);
+void cayman_dma_ring_ib_execute(struct radeon_device *rdev,
+ struct radeon_ib *ib);
+bool cayman_dma_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring);
+void cayman_dma_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm);
/* DCE6 - SI */
void dce6_bandwidth_update(struct radeon_device *rdev);
@@ -476,5 +505,10 @@ void si_vm_set_page(struct radeon_device *rdev, uint64_t pe,
void si_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm);
int si_ib_parse(struct radeon_device *rdev, struct radeon_ib *ib);
uint64_t si_get_gpu_clock(struct radeon_device *rdev);
+int si_copy_dma(struct radeon_device *rdev,
+ uint64_t src_offset, uint64_t dst_offset,
+ unsigned num_gpu_pages,
+ struct radeon_fence **fence);
+void si_dma_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm);
#endif
diff --git a/drivers/gpu/drm/radeon/radeon_combios.c b/drivers/gpu/drm/radeon/radeon_combios.c
index 45b660b27cfc..4af89126e223 100644
--- a/drivers/gpu/drm/radeon/radeon_combios.c
+++ b/drivers/gpu/drm/radeon/radeon_combios.c
@@ -3246,11 +3246,9 @@ static uint32_t combios_detect_ram(struct drm_device *dev, int ram,
while (ram--) {
addr = ram * 1024 * 1024;
/* write to each page */
- WREG32(RADEON_MM_INDEX, (addr) | RADEON_MM_APER);
- WREG32(RADEON_MM_DATA, 0xdeadbeef);
+ WREG32_IDX((addr) | RADEON_MM_APER, 0xdeadbeef);
/* read back and verify */
- WREG32(RADEON_MM_INDEX, (addr) | RADEON_MM_APER);
- if (RREG32(RADEON_MM_DATA) != 0xdeadbeef)
+ if (RREG32_IDX((addr) | RADEON_MM_APER) != 0xdeadbeef)
return 0;
}
diff --git a/drivers/gpu/drm/radeon/radeon_connectors.c b/drivers/gpu/drm/radeon/radeon_connectors.c
index b884c362a8c2..47bf162ab9c6 100644
--- a/drivers/gpu/drm/radeon/radeon_connectors.c
+++ b/drivers/gpu/drm/radeon/radeon_connectors.c
@@ -1599,7 +1599,7 @@ radeon_add_atom_connector(struct drm_device *dev,
connector->interlace_allowed = true;
connector->doublescan_allowed = true;
radeon_connector->dac_load_detect = true;
- drm_connector_attach_property(&radeon_connector->base,
+ drm_object_attach_property(&radeon_connector->base.base,
rdev->mode_info.load_detect_property,
1);
break;
@@ -1608,13 +1608,13 @@ radeon_add_atom_connector(struct drm_device *dev,
case DRM_MODE_CONNECTOR_HDMIA:
case DRM_MODE_CONNECTOR_HDMIB:
case DRM_MODE_CONNECTOR_DisplayPort:
- drm_connector_attach_property(&radeon_connector->base,
+ drm_object_attach_property(&radeon_connector->base.base,
rdev->mode_info.underscan_property,
UNDERSCAN_OFF);
- drm_connector_attach_property(&radeon_connector->base,
+ drm_object_attach_property(&radeon_connector->base.base,
rdev->mode_info.underscan_hborder_property,
0);
- drm_connector_attach_property(&radeon_connector->base,
+ drm_object_attach_property(&radeon_connector->base.base,
rdev->mode_info.underscan_vborder_property,
0);
subpixel_order = SubPixelHorizontalRGB;
@@ -1625,14 +1625,14 @@ radeon_add_atom_connector(struct drm_device *dev,
connector->doublescan_allowed = false;
if (connector_type == DRM_MODE_CONNECTOR_DVII) {
radeon_connector->dac_load_detect = true;
- drm_connector_attach_property(&radeon_connector->base,
+ drm_object_attach_property(&radeon_connector->base.base,
rdev->mode_info.load_detect_property,
1);
}
break;
case DRM_MODE_CONNECTOR_LVDS:
case DRM_MODE_CONNECTOR_eDP:
- drm_connector_attach_property(&radeon_connector->base,
+ drm_object_attach_property(&radeon_connector->base.base,
dev->mode_config.scaling_mode_property,
DRM_MODE_SCALE_FULLSCREEN);
subpixel_order = SubPixelHorizontalRGB;
@@ -1651,7 +1651,7 @@ radeon_add_atom_connector(struct drm_device *dev,
DRM_ERROR("VGA: Failed to assign ddc bus! Check dmesg for i2c errors.\n");
}
radeon_connector->dac_load_detect = true;
- drm_connector_attach_property(&radeon_connector->base,
+ drm_object_attach_property(&radeon_connector->base.base,
rdev->mode_info.load_detect_property,
1);
/* no HPD on analog connectors */
@@ -1669,7 +1669,7 @@ radeon_add_atom_connector(struct drm_device *dev,
DRM_ERROR("DVIA: Failed to assign ddc bus! Check dmesg for i2c errors.\n");
}
radeon_connector->dac_load_detect = true;
- drm_connector_attach_property(&radeon_connector->base,
+ drm_object_attach_property(&radeon_connector->base.base,
rdev->mode_info.load_detect_property,
1);
/* no HPD on analog connectors */
@@ -1692,23 +1692,23 @@ radeon_add_atom_connector(struct drm_device *dev,
DRM_ERROR("DVI: Failed to assign ddc bus! Check dmesg for i2c errors.\n");
}
subpixel_order = SubPixelHorizontalRGB;
- drm_connector_attach_property(&radeon_connector->base,
+ drm_object_attach_property(&radeon_connector->base.base,
rdev->mode_info.coherent_mode_property,
1);
if (ASIC_IS_AVIVO(rdev)) {
- drm_connector_attach_property(&radeon_connector->base,
+ drm_object_attach_property(&radeon_connector->base.base,
rdev->mode_info.underscan_property,
UNDERSCAN_OFF);
- drm_connector_attach_property(&radeon_connector->base,
+ drm_object_attach_property(&radeon_connector->base.base,
rdev->mode_info.underscan_hborder_property,
0);
- drm_connector_attach_property(&radeon_connector->base,
+ drm_object_attach_property(&radeon_connector->base.base,
rdev->mode_info.underscan_vborder_property,
0);
}
if (connector_type == DRM_MODE_CONNECTOR_DVII) {
radeon_connector->dac_load_detect = true;
- drm_connector_attach_property(&radeon_connector->base,
+ drm_object_attach_property(&radeon_connector->base.base,
rdev->mode_info.load_detect_property,
1);
}
@@ -1732,17 +1732,17 @@ radeon_add_atom_connector(struct drm_device *dev,
if (!radeon_connector->ddc_bus)
DRM_ERROR("HDMI: Failed to assign ddc bus! Check dmesg for i2c errors.\n");
}
- drm_connector_attach_property(&radeon_connector->base,
+ drm_object_attach_property(&radeon_connector->base.base,
rdev->mode_info.coherent_mode_property,
1);
if (ASIC_IS_AVIVO(rdev)) {
- drm_connector_attach_property(&radeon_connector->base,
+ drm_object_attach_property(&radeon_connector->base.base,
rdev->mode_info.underscan_property,
UNDERSCAN_OFF);
- drm_connector_attach_property(&radeon_connector->base,
+ drm_object_attach_property(&radeon_connector->base.base,
rdev->mode_info.underscan_hborder_property,
0);
- drm_connector_attach_property(&radeon_connector->base,
+ drm_object_attach_property(&radeon_connector->base.base,
rdev->mode_info.underscan_vborder_property,
0);
}
@@ -1771,17 +1771,17 @@ radeon_add_atom_connector(struct drm_device *dev,
DRM_ERROR("DP: Failed to assign ddc bus! Check dmesg for i2c errors.\n");
}
subpixel_order = SubPixelHorizontalRGB;
- drm_connector_attach_property(&radeon_connector->base,
+ drm_object_attach_property(&radeon_connector->base.base,
rdev->mode_info.coherent_mode_property,
1);
if (ASIC_IS_AVIVO(rdev)) {
- drm_connector_attach_property(&radeon_connector->base,
+ drm_object_attach_property(&radeon_connector->base.base,
rdev->mode_info.underscan_property,
UNDERSCAN_OFF);
- drm_connector_attach_property(&radeon_connector->base,
+ drm_object_attach_property(&radeon_connector->base.base,
rdev->mode_info.underscan_hborder_property,
0);
- drm_connector_attach_property(&radeon_connector->base,
+ drm_object_attach_property(&radeon_connector->base.base,
rdev->mode_info.underscan_vborder_property,
0);
}
@@ -1806,7 +1806,7 @@ radeon_add_atom_connector(struct drm_device *dev,
if (!radeon_connector->ddc_bus)
DRM_ERROR("DP: Failed to assign ddc bus! Check dmesg for i2c errors.\n");
}
- drm_connector_attach_property(&radeon_connector->base,
+ drm_object_attach_property(&radeon_connector->base.base,
dev->mode_config.scaling_mode_property,
DRM_MODE_SCALE_FULLSCREEN);
subpixel_order = SubPixelHorizontalRGB;
@@ -1819,10 +1819,10 @@ radeon_add_atom_connector(struct drm_device *dev,
drm_connector_init(dev, &radeon_connector->base, &radeon_tv_connector_funcs, connector_type);
drm_connector_helper_add(&radeon_connector->base, &radeon_tv_connector_helper_funcs);
radeon_connector->dac_load_detect = true;
- drm_connector_attach_property(&radeon_connector->base,
+ drm_object_attach_property(&radeon_connector->base.base,
rdev->mode_info.load_detect_property,
1);
- drm_connector_attach_property(&radeon_connector->base,
+ drm_object_attach_property(&radeon_connector->base.base,
rdev->mode_info.tv_std_property,
radeon_atombios_get_tv_info(rdev));
/* no HPD on analog connectors */
@@ -1843,7 +1843,7 @@ radeon_add_atom_connector(struct drm_device *dev,
if (!radeon_connector->ddc_bus)
DRM_ERROR("LVDS: Failed to assign ddc bus! Check dmesg for i2c errors.\n");
}
- drm_connector_attach_property(&radeon_connector->base,
+ drm_object_attach_property(&radeon_connector->base.base,
dev->mode_config.scaling_mode_property,
DRM_MODE_SCALE_FULLSCREEN);
subpixel_order = SubPixelHorizontalRGB;
@@ -1922,7 +1922,7 @@ radeon_add_legacy_connector(struct drm_device *dev,
DRM_ERROR("VGA: Failed to assign ddc bus! Check dmesg for i2c errors.\n");
}
radeon_connector->dac_load_detect = true;
- drm_connector_attach_property(&radeon_connector->base,
+ drm_object_attach_property(&radeon_connector->base.base,
rdev->mode_info.load_detect_property,
1);
/* no HPD on analog connectors */
@@ -1940,7 +1940,7 @@ radeon_add_legacy_connector(struct drm_device *dev,
DRM_ERROR("DVIA: Failed to assign ddc bus! Check dmesg for i2c errors.\n");
}
radeon_connector->dac_load_detect = true;
- drm_connector_attach_property(&radeon_connector->base,
+ drm_object_attach_property(&radeon_connector->base.base,
rdev->mode_info.load_detect_property,
1);
/* no HPD on analog connectors */
@@ -1959,7 +1959,7 @@ radeon_add_legacy_connector(struct drm_device *dev,
}
if (connector_type == DRM_MODE_CONNECTOR_DVII) {
radeon_connector->dac_load_detect = true;
- drm_connector_attach_property(&radeon_connector->base,
+ drm_object_attach_property(&radeon_connector->base.base,
rdev->mode_info.load_detect_property,
1);
}
@@ -1983,10 +1983,10 @@ radeon_add_legacy_connector(struct drm_device *dev,
*/
if (rdev->family == CHIP_RS400 || rdev->family == CHIP_RS480)
radeon_connector->dac_load_detect = false;
- drm_connector_attach_property(&radeon_connector->base,
+ drm_object_attach_property(&radeon_connector->base.base,
rdev->mode_info.load_detect_property,
radeon_connector->dac_load_detect);
- drm_connector_attach_property(&radeon_connector->base,
+ drm_object_attach_property(&radeon_connector->base.base,
rdev->mode_info.tv_std_property,
radeon_combios_get_tv_info(rdev));
/* no HPD on analog connectors */
@@ -2002,7 +2002,7 @@ radeon_add_legacy_connector(struct drm_device *dev,
if (!radeon_connector->ddc_bus)
DRM_ERROR("LVDS: Failed to assign ddc bus! Check dmesg for i2c errors.\n");
}
- drm_connector_attach_property(&radeon_connector->base,
+ drm_object_attach_property(&radeon_connector->base.base,
dev->mode_config.scaling_mode_property,
DRM_MODE_SCALE_FULLSCREEN);
subpixel_order = SubPixelHorizontalRGB;
diff --git a/drivers/gpu/drm/radeon/radeon_cp.c b/drivers/gpu/drm/radeon/radeon_cp.c
index 8b2797dc7b64..9143fc45e35b 100644
--- a/drivers/gpu/drm/radeon/radeon_cp.c
+++ b/drivers/gpu/drm/radeon/radeon_cp.c
@@ -116,20 +116,6 @@ u32 radeon_get_scratch(drm_radeon_private_t *dev_priv, int index)
}
}
-u32 RADEON_READ_MM(drm_radeon_private_t *dev_priv, int addr)
-{
- u32 ret;
-
- if (addr < 0x10000)
- ret = DRM_READ32(dev_priv->mmio, addr);
- else {
- DRM_WRITE32(dev_priv->mmio, RADEON_MM_INDEX, addr);
- ret = DRM_READ32(dev_priv->mmio, RADEON_MM_DATA);
- }
-
- return ret;
-}
-
static u32 R500_READ_MCIND(drm_radeon_private_t *dev_priv, int addr)
{
u32 ret;
diff --git a/drivers/gpu/drm/radeon/radeon_cs.c b/drivers/gpu/drm/radeon/radeon_cs.c
index 41672cc563fb..396baba0141a 100644
--- a/drivers/gpu/drm/radeon/radeon_cs.c
+++ b/drivers/gpu/drm/radeon/radeon_cs.c
@@ -43,6 +43,7 @@ static int radeon_cs_parser_relocs(struct radeon_cs_parser *p)
return 0;
}
chunk = &p->chunks[p->chunk_relocs_idx];
+ p->dma_reloc_idx = 0;
/* FIXME: we assume that each relocs use 4 dwords */
p->nrelocs = chunk->length_dw / 4;
p->relocs_ptr = kcalloc(p->nrelocs, sizeof(void *), GFP_KERNEL);
@@ -111,6 +112,18 @@ static int radeon_cs_get_ring(struct radeon_cs_parser *p, u32 ring, s32 priority
} else
p->ring = RADEON_RING_TYPE_GFX_INDEX;
break;
+ case RADEON_CS_RING_DMA:
+ if (p->rdev->family >= CHIP_CAYMAN) {
+ if (p->priority > 0)
+ p->ring = R600_RING_TYPE_DMA_INDEX;
+ else
+ p->ring = CAYMAN_RING_TYPE_DMA1_INDEX;
+ } else if (p->rdev->family >= CHIP_R600) {
+ p->ring = R600_RING_TYPE_DMA_INDEX;
+ } else {
+ return -EINVAL;
+ }
+ break;
}
return 0;
}
diff --git a/drivers/gpu/drm/radeon/radeon_cursor.c b/drivers/gpu/drm/radeon/radeon_cursor.c
index 0fe56c9f64bd..ad6df625e8b8 100644
--- a/drivers/gpu/drm/radeon/radeon_cursor.c
+++ b/drivers/gpu/drm/radeon/radeon_cursor.c
@@ -66,24 +66,25 @@ static void radeon_hide_cursor(struct drm_crtc *crtc)
struct radeon_device *rdev = crtc->dev->dev_private;
if (ASIC_IS_DCE4(rdev)) {
- WREG32(RADEON_MM_INDEX, EVERGREEN_CUR_CONTROL + radeon_crtc->crtc_offset);
- WREG32(RADEON_MM_DATA, EVERGREEN_CURSOR_MODE(EVERGREEN_CURSOR_24_8_PRE_MULT) |
- EVERGREEN_CURSOR_URGENT_CONTROL(EVERGREEN_CURSOR_URGENT_1_2));
+ WREG32_IDX(EVERGREEN_CUR_CONTROL + radeon_crtc->crtc_offset,
+ EVERGREEN_CURSOR_MODE(EVERGREEN_CURSOR_24_8_PRE_MULT) |
+ EVERGREEN_CURSOR_URGENT_CONTROL(EVERGREEN_CURSOR_URGENT_1_2));
} else if (ASIC_IS_AVIVO(rdev)) {
- WREG32(RADEON_MM_INDEX, AVIVO_D1CUR_CONTROL + radeon_crtc->crtc_offset);
- WREG32(RADEON_MM_DATA, (AVIVO_D1CURSOR_MODE_24BPP << AVIVO_D1CURSOR_MODE_SHIFT));
+ WREG32_IDX(AVIVO_D1CUR_CONTROL + radeon_crtc->crtc_offset,
+ (AVIVO_D1CURSOR_MODE_24BPP << AVIVO_D1CURSOR_MODE_SHIFT));
} else {
+ u32 reg;
switch (radeon_crtc->crtc_id) {
case 0:
- WREG32(RADEON_MM_INDEX, RADEON_CRTC_GEN_CNTL);
+ reg = RADEON_CRTC_GEN_CNTL;
break;
case 1:
- WREG32(RADEON_MM_INDEX, RADEON_CRTC2_GEN_CNTL);
+ reg = RADEON_CRTC2_GEN_CNTL;
break;
default:
return;
}
- WREG32_P(RADEON_MM_DATA, 0, ~RADEON_CRTC_CUR_EN);
+ WREG32_IDX(reg, RREG32_IDX(reg) & ~RADEON_CRTC_CUR_EN);
}
}
diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c
index e2f5f888c374..49b06590001e 100644
--- a/drivers/gpu/drm/radeon/radeon_device.c
+++ b/drivers/gpu/drm/radeon/radeon_device.c
@@ -1059,6 +1059,7 @@ int radeon_device_init(struct radeon_device *rdev,
/* Registers mapping */
/* TODO: block userspace mapping of io register */
+ spin_lock_init(&rdev->mmio_idx_lock);
rdev->rmmio_base = pci_resource_start(rdev->pdev, 2);
rdev->rmmio_size = pci_resource_len(rdev->pdev, 2);
rdev->rmmio = ioremap(rdev->rmmio_base, rdev->rmmio_size);
diff --git a/drivers/gpu/drm/radeon/radeon_display.c b/drivers/gpu/drm/radeon/radeon_display.c
index bfa2a6015727..310c0e5254ba 100644
--- a/drivers/gpu/drm/radeon/radeon_display.c
+++ b/drivers/gpu/drm/radeon/radeon_display.c
@@ -378,8 +378,12 @@ static int radeon_crtc_page_flip(struct drm_crtc *crtc,
work->old_rbo = rbo;
obj = new_radeon_fb->obj;
rbo = gem_to_radeon_bo(obj);
+
+ spin_lock(&rbo->tbo.bdev->fence_lock);
if (rbo->tbo.sync_obj)
work->fence = radeon_fence_ref(rbo->tbo.sync_obj);
+ spin_unlock(&rbo->tbo.bdev->fence_lock);
+
INIT_WORK(&work->work, radeon_unpin_work_func);
/* We borrow the event spin lock for protecting unpin_work */
diff --git a/drivers/gpu/drm/radeon/radeon_drv.c b/drivers/gpu/drm/radeon/radeon_drv.c
index 8c1a83c6eb07..9b1a727d3c9e 100644
--- a/drivers/gpu/drm/radeon/radeon_drv.c
+++ b/drivers/gpu/drm/radeon/radeon_drv.c
@@ -65,9 +65,12 @@
* 2.22.0 - r600 only: RESOLVE_BOX allowed
* 2.23.0 - allow STRMOUT_BASE_UPDATE on RS780 and RS880
* 2.24.0 - eg only: allow MIP_ADDRESS=0 for MSAA textures
+ * 2.25.0 - eg+: new info request for num SE and num SH
+ * 2.26.0 - r600-eg: fix htile size computation
+ * 2.27.0 - r600-SI: Add CS ioctl support for async DMA
*/
#define KMS_DRIVER_MAJOR 2
-#define KMS_DRIVER_MINOR 24
+#define KMS_DRIVER_MINOR 27
#define KMS_DRIVER_PATCHLEVEL 0
int radeon_driver_load_kms(struct drm_device *dev, unsigned long flags);
int radeon_driver_unload_kms(struct drm_device *dev);
diff --git a/drivers/gpu/drm/radeon/radeon_drv.h b/drivers/gpu/drm/radeon/radeon_drv.h
index a1b59ca96d01..e7fdf163a8ca 100644
--- a/drivers/gpu/drm/radeon/radeon_drv.h
+++ b/drivers/gpu/drm/radeon/radeon_drv.h
@@ -366,7 +366,6 @@ extern int radeon_cp_buffers(struct drm_device *dev, void *data, struct drm_file
extern u32 radeon_read_fb_location(drm_radeon_private_t *dev_priv);
extern void radeon_write_agp_location(drm_radeon_private_t *dev_priv, u32 agp_loc);
extern void radeon_write_agp_base(drm_radeon_private_t *dev_priv, u64 agp_base);
-extern u32 RADEON_READ_MM(drm_radeon_private_t *dev_priv, int addr);
extern void radeon_freelist_reset(struct drm_device * dev);
extern struct drm_buf *radeon_freelist_get(struct drm_device * dev);
diff --git a/drivers/gpu/drm/radeon/radeon_fence.c b/drivers/gpu/drm/radeon/radeon_fence.c
index 22bd6c2c2740..410a975a8eec 100644
--- a/drivers/gpu/drm/radeon/radeon_fence.c
+++ b/drivers/gpu/drm/radeon/radeon_fence.c
@@ -772,7 +772,7 @@ int radeon_fence_driver_start_ring(struct radeon_device *rdev, int ring)
int r;
radeon_scratch_free(rdev, rdev->fence_drv[ring].scratch_reg);
- if (rdev->wb.use_event) {
+ if (rdev->wb.use_event || !radeon_ring_supports_scratch_reg(rdev, &rdev->ring[ring])) {
rdev->fence_drv[ring].scratch_reg = 0;
index = R600_WB_EVENT_OFFSET + ring * 4;
} else {
diff --git a/drivers/gpu/drm/radeon/radeon_gart.c b/drivers/gpu/drm/radeon/radeon_gart.c
index 8690be757d80..6e24f84755b5 100644
--- a/drivers/gpu/drm/radeon/radeon_gart.c
+++ b/drivers/gpu/drm/radeon/radeon_gart.c
@@ -1237,7 +1237,6 @@ void radeon_vm_bo_invalidate(struct radeon_device *rdev,
{
struct radeon_bo_va *bo_va;
- BUG_ON(!radeon_bo_is_reserved(bo));
list_for_each_entry(bo_va, &bo->va, bo_list) {
bo_va->valid = false;
}
diff --git a/drivers/gpu/drm/radeon/radeon_kms.c b/drivers/gpu/drm/radeon/radeon_kms.c
index dc781c49b96b..9c312f9afb68 100644
--- a/drivers/gpu/drm/radeon/radeon_kms.c
+++ b/drivers/gpu/drm/radeon/radeon_kms.c
@@ -361,6 +361,22 @@ int radeon_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
return -EINVAL;
}
break;
+ case RADEON_INFO_MAX_SE:
+ if (rdev->family >= CHIP_TAHITI)
+ value = rdev->config.si.max_shader_engines;
+ else if (rdev->family >= CHIP_CAYMAN)
+ value = rdev->config.cayman.max_shader_engines;
+ else if (rdev->family >= CHIP_CEDAR)
+ value = rdev->config.evergreen.num_ses;
+ else
+ value = 1;
+ break;
+ case RADEON_INFO_MAX_SH_PER_SE:
+ if (rdev->family >= CHIP_TAHITI)
+ value = rdev->config.si.max_sh_per_se;
+ else
+ return -EINVAL;
+ break;
default:
DRM_DEBUG_KMS("Invalid request %d\n", info->request);
return -EINVAL;
diff --git a/drivers/gpu/drm/radeon/radeon_object.c b/drivers/gpu/drm/radeon/radeon_object.c
index 7c4b4bb05a36..883c95d8d90f 100644
--- a/drivers/gpu/drm/radeon/radeon_object.c
+++ b/drivers/gpu/drm/radeon/radeon_object.c
@@ -84,17 +84,34 @@ void radeon_ttm_placement_from_domain(struct radeon_bo *rbo, u32 domain)
rbo->placement.fpfn = 0;
rbo->placement.lpfn = 0;
rbo->placement.placement = rbo->placements;
- rbo->placement.busy_placement = rbo->placements;
if (domain & RADEON_GEM_DOMAIN_VRAM)
rbo->placements[c++] = TTM_PL_FLAG_WC | TTM_PL_FLAG_UNCACHED |
TTM_PL_FLAG_VRAM;
- if (domain & RADEON_GEM_DOMAIN_GTT)
- rbo->placements[c++] = TTM_PL_MASK_CACHING | TTM_PL_FLAG_TT;
- if (domain & RADEON_GEM_DOMAIN_CPU)
- rbo->placements[c++] = TTM_PL_MASK_CACHING | TTM_PL_FLAG_SYSTEM;
+ if (domain & RADEON_GEM_DOMAIN_GTT) {
+ if (rbo->rdev->flags & RADEON_IS_AGP) {
+ rbo->placements[c++] = TTM_PL_FLAG_WC | TTM_PL_FLAG_TT;
+ } else {
+ rbo->placements[c++] = TTM_PL_FLAG_CACHED | TTM_PL_FLAG_TT;
+ }
+ }
+ if (domain & RADEON_GEM_DOMAIN_CPU) {
+ if (rbo->rdev->flags & RADEON_IS_AGP) {
+ rbo->placements[c++] = TTM_PL_FLAG_WC | TTM_PL_FLAG_SYSTEM;
+ } else {
+ rbo->placements[c++] = TTM_PL_FLAG_CACHED | TTM_PL_FLAG_SYSTEM;
+ }
+ }
if (!c)
rbo->placements[c++] = TTM_PL_MASK_CACHING | TTM_PL_FLAG_SYSTEM;
rbo->placement.num_placement = c;
+
+ c = 0;
+ rbo->placement.busy_placement = rbo->busy_placements;
+ if (rbo->rdev->flags & RADEON_IS_AGP) {
+ rbo->busy_placements[c++] = TTM_PL_FLAG_WC | TTM_PL_FLAG_TT;
+ } else {
+ rbo->busy_placements[c++] = TTM_PL_FLAG_CACHED | TTM_PL_FLAG_TT;
+ }
rbo->placement.num_busy_placement = c;
}
@@ -240,7 +257,7 @@ int radeon_bo_pin_restricted(struct radeon_bo *bo, u32 domain, u64 max_offset,
}
for (i = 0; i < bo->placement.num_placement; i++)
bo->placements[i] |= TTM_PL_FLAG_NO_EVICT;
- r = ttm_bo_validate(&bo->tbo, &bo->placement, false, false, false);
+ r = ttm_bo_validate(&bo->tbo, &bo->placement, false, false);
if (likely(r == 0)) {
bo->pin_count = 1;
if (gpu_addr != NULL)
@@ -269,7 +286,7 @@ int radeon_bo_unpin(struct radeon_bo *bo)
return 0;
for (i = 0; i < bo->placement.num_placement; i++)
bo->placements[i] &= ~TTM_PL_FLAG_NO_EVICT;
- r = ttm_bo_validate(&bo->tbo, &bo->placement, false, false, false);
+ r = ttm_bo_validate(&bo->tbo, &bo->placement, false, false);
if (unlikely(r != 0))
dev_err(bo->rdev->dev, "%p validate failed for unpin\n", bo);
return r;
@@ -340,7 +357,6 @@ int radeon_bo_list_validate(struct list_head *head)
{
struct radeon_bo_list *lobj;
struct radeon_bo *bo;
- u32 domain;
int r;
r = ttm_eu_reserve_buffers(head);
@@ -350,17 +366,9 @@ int radeon_bo_list_validate(struct list_head *head)
list_for_each_entry(lobj, head, tv.head) {
bo = lobj->bo;
if (!bo->pin_count) {
- domain = lobj->wdomain ? lobj->wdomain : lobj->rdomain;
-
- retry:
- radeon_ttm_placement_from_domain(bo, domain);
r = ttm_bo_validate(&bo->tbo, &bo->placement,
- true, false, false);
+ true, false);
if (unlikely(r)) {
- if (r != -ERESTARTSYS && domain == RADEON_GEM_DOMAIN_VRAM) {
- domain |= RADEON_GEM_DOMAIN_GTT;
- goto retry;
- }
return r;
}
}
@@ -520,7 +528,7 @@ void radeon_bo_get_tiling_flags(struct radeon_bo *bo,
int radeon_bo_check_tiling(struct radeon_bo *bo, bool has_moved,
bool force_drop)
{
- BUG_ON(!radeon_bo_is_reserved(bo));
+ BUG_ON(!radeon_bo_is_reserved(bo) && !force_drop);
if (!(bo->tiling_flags & RADEON_TILING_SURFACE))
return 0;
@@ -575,7 +583,7 @@ int radeon_bo_fault_reserve_notify(struct ttm_buffer_object *bo)
/* hurrah the memory is not visible ! */
radeon_ttm_placement_from_domain(rbo, RADEON_GEM_DOMAIN_VRAM);
rbo->placement.lpfn = rdev->mc.visible_vram_size >> PAGE_SHIFT;
- r = ttm_bo_validate(bo, &rbo->placement, false, true, false);
+ r = ttm_bo_validate(bo, &rbo->placement, false, false);
if (unlikely(r != 0))
return r;
offset = bo->mem.start << PAGE_SHIFT;
diff --git a/drivers/gpu/drm/radeon/radeon_test.c b/drivers/gpu/drm/radeon/radeon_test.c
index 587c09a00ba2..fda09c9ea689 100644
--- a/drivers/gpu/drm/radeon/radeon_test.c
+++ b/drivers/gpu/drm/radeon/radeon_test.c
@@ -26,16 +26,31 @@
#include "radeon_reg.h"
#include "radeon.h"
+#define RADEON_TEST_COPY_BLIT 1
+#define RADEON_TEST_COPY_DMA 0
+
/* Test BO GTT->VRAM and VRAM->GTT GPU copies across the whole GTT aperture */
-void radeon_test_moves(struct radeon_device *rdev)
+static void radeon_do_test_moves(struct radeon_device *rdev, int flag)
{
struct radeon_bo *vram_obj = NULL;
struct radeon_bo **gtt_obj = NULL;
struct radeon_fence *fence = NULL;
uint64_t gtt_addr, vram_addr;
unsigned i, n, size;
- int r;
+ int r, ring;
+
+ switch (flag) {
+ case RADEON_TEST_COPY_DMA:
+ ring = radeon_copy_dma_ring_index(rdev);
+ break;
+ case RADEON_TEST_COPY_BLIT:
+ ring = radeon_copy_blit_ring_index(rdev);
+ break;
+ default:
+ DRM_ERROR("Unknown copy method\n");
+ return;
+ }
size = 1024 * 1024;
@@ -106,7 +121,10 @@ void radeon_test_moves(struct radeon_device *rdev)
radeon_bo_kunmap(gtt_obj[i]);
- r = radeon_copy(rdev, gtt_addr, vram_addr, size / RADEON_GPU_PAGE_SIZE, &fence);
+ if (ring == R600_RING_TYPE_DMA_INDEX)
+ r = radeon_copy_dma(rdev, gtt_addr, vram_addr, size / RADEON_GPU_PAGE_SIZE, &fence);
+ else
+ r = radeon_copy_blit(rdev, gtt_addr, vram_addr, size / RADEON_GPU_PAGE_SIZE, &fence);
if (r) {
DRM_ERROR("Failed GTT->VRAM copy %d\n", i);
goto out_cleanup;
@@ -149,7 +167,10 @@ void radeon_test_moves(struct radeon_device *rdev)
radeon_bo_kunmap(vram_obj);
- r = radeon_copy(rdev, vram_addr, gtt_addr, size / RADEON_GPU_PAGE_SIZE, &fence);
+ if (ring == R600_RING_TYPE_DMA_INDEX)
+ r = radeon_copy_dma(rdev, vram_addr, gtt_addr, size / RADEON_GPU_PAGE_SIZE, &fence);
+ else
+ r = radeon_copy_blit(rdev, vram_addr, gtt_addr, size / RADEON_GPU_PAGE_SIZE, &fence);
if (r) {
DRM_ERROR("Failed VRAM->GTT copy %d\n", i);
goto out_cleanup;
@@ -223,6 +244,14 @@ out_cleanup:
}
}
+void radeon_test_moves(struct radeon_device *rdev)
+{
+ if (rdev->asic->copy.dma)
+ radeon_do_test_moves(rdev, RADEON_TEST_COPY_DMA);
+ if (rdev->asic->copy.blit)
+ radeon_do_test_moves(rdev, RADEON_TEST_COPY_BLIT);
+}
+
void radeon_test_ring_sync(struct radeon_device *rdev,
struct radeon_ring *ringA,
struct radeon_ring *ringB)
diff --git a/drivers/gpu/drm/radeon/radeon_ttm.c b/drivers/gpu/drm/radeon/radeon_ttm.c
index 563c8edcb03b..1d8ff2f850ba 100644
--- a/drivers/gpu/drm/radeon/radeon_ttm.c
+++ b/drivers/gpu/drm/radeon/radeon_ttm.c
@@ -216,7 +216,7 @@ static void radeon_move_null(struct ttm_buffer_object *bo,
}
static int radeon_move_blit(struct ttm_buffer_object *bo,
- bool evict, int no_wait_reserve, bool no_wait_gpu,
+ bool evict, bool no_wait_gpu,
struct ttm_mem_reg *new_mem,
struct ttm_mem_reg *old_mem)
{
@@ -266,14 +266,14 @@ static int radeon_move_blit(struct ttm_buffer_object *bo,
&fence);
/* FIXME: handle copy error */
r = ttm_bo_move_accel_cleanup(bo, (void *)fence,
- evict, no_wait_reserve, no_wait_gpu, new_mem);
+ evict, no_wait_gpu, new_mem);
radeon_fence_unref(&fence);
return r;
}
static int radeon_move_vram_ram(struct ttm_buffer_object *bo,
bool evict, bool interruptible,
- bool no_wait_reserve, bool no_wait_gpu,
+ bool no_wait_gpu,
struct ttm_mem_reg *new_mem)
{
struct radeon_device *rdev;
@@ -294,7 +294,7 @@ static int radeon_move_vram_ram(struct ttm_buffer_object *bo,
placement.busy_placement = &placements;
placements = TTM_PL_MASK_CACHING | TTM_PL_FLAG_TT;
r = ttm_bo_mem_space(bo, &placement, &tmp_mem,
- interruptible, no_wait_reserve, no_wait_gpu);
+ interruptible, no_wait_gpu);
if (unlikely(r)) {
return r;
}
@@ -308,11 +308,11 @@ static int radeon_move_vram_ram(struct ttm_buffer_object *bo,
if (unlikely(r)) {
goto out_cleanup;
}
- r = radeon_move_blit(bo, true, no_wait_reserve, no_wait_gpu, &tmp_mem, old_mem);
+ r = radeon_move_blit(bo, true, no_wait_gpu, &tmp_mem, old_mem);
if (unlikely(r)) {
goto out_cleanup;
}
- r = ttm_bo_move_ttm(bo, true, no_wait_reserve, no_wait_gpu, new_mem);
+ r = ttm_bo_move_ttm(bo, true, no_wait_gpu, new_mem);
out_cleanup:
ttm_bo_mem_put(bo, &tmp_mem);
return r;
@@ -320,7 +320,7 @@ out_cleanup:
static int radeon_move_ram_vram(struct ttm_buffer_object *bo,
bool evict, bool interruptible,
- bool no_wait_reserve, bool no_wait_gpu,
+ bool no_wait_gpu,
struct ttm_mem_reg *new_mem)
{
struct radeon_device *rdev;
@@ -340,15 +340,16 @@ static int radeon_move_ram_vram(struct ttm_buffer_object *bo,
placement.num_busy_placement = 1;
placement.busy_placement = &placements;
placements = TTM_PL_MASK_CACHING | TTM_PL_FLAG_TT;
- r = ttm_bo_mem_space(bo, &placement, &tmp_mem, interruptible, no_wait_reserve, no_wait_gpu);
+ r = ttm_bo_mem_space(bo, &placement, &tmp_mem,
+ interruptible, no_wait_gpu);
if (unlikely(r)) {
return r;
}
- r = ttm_bo_move_ttm(bo, true, no_wait_reserve, no_wait_gpu, &tmp_mem);
+ r = ttm_bo_move_ttm(bo, true, no_wait_gpu, &tmp_mem);
if (unlikely(r)) {
goto out_cleanup;
}
- r = radeon_move_blit(bo, true, no_wait_reserve, no_wait_gpu, new_mem, old_mem);
+ r = radeon_move_blit(bo, true, no_wait_gpu, new_mem, old_mem);
if (unlikely(r)) {
goto out_cleanup;
}
@@ -359,7 +360,7 @@ out_cleanup:
static int radeon_bo_move(struct ttm_buffer_object *bo,
bool evict, bool interruptible,
- bool no_wait_reserve, bool no_wait_gpu,
+ bool no_wait_gpu,
struct ttm_mem_reg *new_mem)
{
struct radeon_device *rdev;
@@ -388,18 +389,18 @@ static int radeon_bo_move(struct ttm_buffer_object *bo,
if (old_mem->mem_type == TTM_PL_VRAM &&
new_mem->mem_type == TTM_PL_SYSTEM) {
r = radeon_move_vram_ram(bo, evict, interruptible,
- no_wait_reserve, no_wait_gpu, new_mem);
+ no_wait_gpu, new_mem);
} else if (old_mem->mem_type == TTM_PL_SYSTEM &&
new_mem->mem_type == TTM_PL_VRAM) {
r = radeon_move_ram_vram(bo, evict, interruptible,
- no_wait_reserve, no_wait_gpu, new_mem);
+ no_wait_gpu, new_mem);
} else {
- r = radeon_move_blit(bo, evict, no_wait_reserve, no_wait_gpu, new_mem, old_mem);
+ r = radeon_move_blit(bo, evict, no_wait_gpu, new_mem, old_mem);
}
if (r) {
memcpy:
- r = ttm_bo_move_memcpy(bo, evict, no_wait_reserve, no_wait_gpu, new_mem);
+ r = ttm_bo_move_memcpy(bo, evict, no_wait_gpu, new_mem);
}
return r;
}
diff --git a/drivers/gpu/drm/radeon/rv515.c b/drivers/gpu/drm/radeon/rv515.c
index 785d09590b24..2bb6d0e84b3d 100644
--- a/drivers/gpu/drm/radeon/rv515.c
+++ b/drivers/gpu/drm/radeon/rv515.c
@@ -40,6 +40,12 @@ static int rv515_debugfs_ga_info_init(struct radeon_device *rdev);
static void rv515_gpu_init(struct radeon_device *rdev);
int rv515_mc_wait_for_idle(struct radeon_device *rdev);
+static const u32 crtc_offsets[2] =
+{
+ 0,
+ AVIVO_D2CRTC_H_TOTAL - AVIVO_D1CRTC_H_TOTAL
+};
+
void rv515_debugfs(struct radeon_device *rdev)
{
if (r100_debugfs_rbbm_init(rdev)) {
@@ -281,30 +287,114 @@ static int rv515_debugfs_ga_info_init(struct radeon_device *rdev)
void rv515_mc_stop(struct radeon_device *rdev, struct rv515_mc_save *save)
{
+ u32 crtc_enabled, tmp, frame_count, blackout;
+ int i, j;
+
save->vga_render_control = RREG32(R_000300_VGA_RENDER_CONTROL);
save->vga_hdp_control = RREG32(R_000328_VGA_HDP_CONTROL);
- /* Stop all video */
- WREG32(R_0068E8_D2CRTC_UPDATE_LOCK, 0);
+ /* disable VGA render */
WREG32(R_000300_VGA_RENDER_CONTROL, 0);
- WREG32(R_0060E8_D1CRTC_UPDATE_LOCK, 1);
- WREG32(R_0068E8_D2CRTC_UPDATE_LOCK, 1);
- WREG32(R_006080_D1CRTC_CONTROL, 0);
- WREG32(R_006880_D2CRTC_CONTROL, 0);
- WREG32(R_0060E8_D1CRTC_UPDATE_LOCK, 0);
- WREG32(R_0068E8_D2CRTC_UPDATE_LOCK, 0);
- WREG32(R_000330_D1VGA_CONTROL, 0);
- WREG32(R_000338_D2VGA_CONTROL, 0);
+ /* blank the display controllers */
+ for (i = 0; i < rdev->num_crtc; i++) {
+ crtc_enabled = RREG32(AVIVO_D1CRTC_CONTROL + crtc_offsets[i]) & AVIVO_CRTC_EN;
+ if (crtc_enabled) {
+ save->crtc_enabled[i] = true;
+ tmp = RREG32(AVIVO_D1CRTC_CONTROL + crtc_offsets[i]);
+ if (!(tmp & AVIVO_CRTC_DISP_READ_REQUEST_DISABLE)) {
+ radeon_wait_for_vblank(rdev, i);
+ tmp |= AVIVO_CRTC_DISP_READ_REQUEST_DISABLE;
+ WREG32(AVIVO_D1CRTC_CONTROL + crtc_offsets[i], tmp);
+ }
+ /* wait for the next frame */
+ frame_count = radeon_get_vblank_counter(rdev, i);
+ for (j = 0; j < rdev->usec_timeout; j++) {
+ if (radeon_get_vblank_counter(rdev, i) != frame_count)
+ break;
+ udelay(1);
+ }
+ } else {
+ save->crtc_enabled[i] = false;
+ }
+ }
+
+ radeon_mc_wait_for_idle(rdev);
+
+ if (rdev->family >= CHIP_R600) {
+ if (rdev->family >= CHIP_RV770)
+ blackout = RREG32(R700_MC_CITF_CNTL);
+ else
+ blackout = RREG32(R600_CITF_CNTL);
+ if ((blackout & R600_BLACKOUT_MASK) != R600_BLACKOUT_MASK) {
+ /* Block CPU access */
+ WREG32(R600_BIF_FB_EN, 0);
+ /* blackout the MC */
+ blackout |= R600_BLACKOUT_MASK;
+ if (rdev->family >= CHIP_RV770)
+ WREG32(R700_MC_CITF_CNTL, blackout);
+ else
+ WREG32(R600_CITF_CNTL, blackout);
+ }
+ }
}
void rv515_mc_resume(struct radeon_device *rdev, struct rv515_mc_save *save)
{
- WREG32(R_006110_D1GRPH_PRIMARY_SURFACE_ADDRESS, rdev->mc.vram_start);
- WREG32(R_006118_D1GRPH_SECONDARY_SURFACE_ADDRESS, rdev->mc.vram_start);
- WREG32(R_006910_D2GRPH_PRIMARY_SURFACE_ADDRESS, rdev->mc.vram_start);
- WREG32(R_006918_D2GRPH_SECONDARY_SURFACE_ADDRESS, rdev->mc.vram_start);
- WREG32(R_000310_VGA_MEMORY_BASE_ADDRESS, rdev->mc.vram_start);
- /* Unlock host access */
+ u32 tmp, frame_count;
+ int i, j;
+
+ /* update crtc base addresses */
+ for (i = 0; i < rdev->num_crtc; i++) {
+ if (rdev->family >= CHIP_RV770) {
+ if (i == 1) {
+ WREG32(R700_D1GRPH_PRIMARY_SURFACE_ADDRESS_HIGH,
+ upper_32_bits(rdev->mc.vram_start));
+ WREG32(R700_D1GRPH_SECONDARY_SURFACE_ADDRESS_HIGH,
+ upper_32_bits(rdev->mc.vram_start));
+ } else {
+ WREG32(R700_D2GRPH_PRIMARY_SURFACE_ADDRESS_HIGH,
+ upper_32_bits(rdev->mc.vram_start));
+ WREG32(R700_D2GRPH_SECONDARY_SURFACE_ADDRESS_HIGH,
+ upper_32_bits(rdev->mc.vram_start));
+ }
+ }
+ WREG32(R_006110_D1GRPH_PRIMARY_SURFACE_ADDRESS + crtc_offsets[i],
+ (u32)rdev->mc.vram_start);
+ WREG32(R_006118_D1GRPH_SECONDARY_SURFACE_ADDRESS + crtc_offsets[i],
+ (u32)rdev->mc.vram_start);
+ }
+ WREG32(R_000310_VGA_MEMORY_BASE_ADDRESS, (u32)rdev->mc.vram_start);
+
+ if (rdev->family >= CHIP_R600) {
+ /* unblackout the MC */
+ if (rdev->family >= CHIP_RV770)
+ tmp = RREG32(R700_MC_CITF_CNTL);
+ else
+ tmp = RREG32(R600_CITF_CNTL);
+ tmp &= ~R600_BLACKOUT_MASK;
+ if (rdev->family >= CHIP_RV770)
+ WREG32(R700_MC_CITF_CNTL, tmp);
+ else
+ WREG32(R600_CITF_CNTL, tmp);
+ /* allow CPU access */
+ WREG32(R600_BIF_FB_EN, R600_FB_READ_EN | R600_FB_WRITE_EN);
+ }
+
+ for (i = 0; i < rdev->num_crtc; i++) {
+ if (save->crtc_enabled[i]) {
+ tmp = RREG32(AVIVO_D1CRTC_CONTROL + crtc_offsets[i]);
+ tmp &= ~AVIVO_CRTC_DISP_READ_REQUEST_DISABLE;
+ WREG32(AVIVO_D1CRTC_CONTROL + crtc_offsets[i], tmp);
+ /* wait for the next frame */
+ frame_count = radeon_get_vblank_counter(rdev, i);
+ for (j = 0; j < rdev->usec_timeout; j++) {
+ if (radeon_get_vblank_counter(rdev, i) != frame_count)
+ break;
+ udelay(1);
+ }
+ }
+ }
+ /* Unlock vga access */
WREG32(R_000328_VGA_HDP_CONTROL, save->vga_hdp_control);
mdelay(1);
WREG32(R_000300_VGA_RENDER_CONTROL, save->vga_render_control);
diff --git a/drivers/gpu/drm/radeon/rv770.c b/drivers/gpu/drm/radeon/rv770.c
index 79814a08c8e5..87c979c4f721 100644
--- a/drivers/gpu/drm/radeon/rv770.c
+++ b/drivers/gpu/drm/radeon/rv770.c
@@ -316,6 +316,7 @@ void r700_cp_stop(struct radeon_device *rdev)
radeon_ttm_set_active_vram_size(rdev, rdev->mc.visible_vram_size);
WREG32(CP_ME_CNTL, (CP_ME_HALT | CP_PFP_HALT));
WREG32(SCRATCH_UMSK, 0);
+ rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ready = false;
}
static int rv770_cp_load_microcode(struct radeon_device *rdev)
@@ -583,6 +584,8 @@ static void rv770_gpu_init(struct radeon_device *rdev)
WREG32(GB_TILING_CONFIG, gb_tiling_config);
WREG32(DCP_TILING_CONFIG, (gb_tiling_config & 0xffff));
WREG32(HDP_TILING_CONFIG, (gb_tiling_config & 0xffff));
+ WREG32(DMA_TILING_CONFIG, (gb_tiling_config & 0xffff));
+ WREG32(DMA_TILING_CONFIG2, (gb_tiling_config & 0xffff));
WREG32(CGTS_SYS_TCC_DISABLE, 0);
WREG32(CGTS_TCC_DISABLE, 0);
@@ -886,7 +889,7 @@ static int rv770_mc_init(struct radeon_device *rdev)
static int rv770_startup(struct radeon_device *rdev)
{
- struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
+ struct radeon_ring *ring;
int r;
/* enable pcie gen2 link */
@@ -932,6 +935,12 @@ static int rv770_startup(struct radeon_device *rdev)
return r;
}
+ r = radeon_fence_driver_start_ring(rdev, R600_RING_TYPE_DMA_INDEX);
+ if (r) {
+ dev_err(rdev->dev, "failed initializing DMA fences (%d).\n", r);
+ return r;
+ }
+
/* Enable IRQ */
r = r600_irq_init(rdev);
if (r) {
@@ -941,11 +950,20 @@ static int rv770_startup(struct radeon_device *rdev)
}
r600_irq_set(rdev);
+ ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
r = radeon_ring_init(rdev, ring, ring->ring_size, RADEON_WB_CP_RPTR_OFFSET,
R600_CP_RB_RPTR, R600_CP_RB_WPTR,
0, 0xfffff, RADEON_CP_PACKET2);
if (r)
return r;
+
+ ring = &rdev->ring[R600_RING_TYPE_DMA_INDEX];
+ r = radeon_ring_init(rdev, ring, ring->ring_size, R600_WB_DMA_RPTR_OFFSET,
+ DMA_RB_RPTR, DMA_RB_WPTR,
+ 2, 0x3fffc, DMA_PACKET(DMA_PACKET_NOP, 0, 0, 0));
+ if (r)
+ return r;
+
r = rv770_cp_load_microcode(rdev);
if (r)
return r;
@@ -953,6 +971,10 @@ static int rv770_startup(struct radeon_device *rdev)
if (r)
return r;
+ r = r600_dma_resume(rdev);
+ if (r)
+ return r;
+
r = radeon_ib_pool_init(rdev);
if (r) {
dev_err(rdev->dev, "IB initialization failed (%d).\n", r);
@@ -995,7 +1017,7 @@ int rv770_suspend(struct radeon_device *rdev)
{
r600_audio_fini(rdev);
r700_cp_stop(rdev);
- rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ready = false;
+ r600_dma_stop(rdev);
r600_irq_suspend(rdev);
radeon_wb_disable(rdev);
rv770_pcie_gart_disable(rdev);
@@ -1066,6 +1088,9 @@ int rv770_init(struct radeon_device *rdev)
rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ring_obj = NULL;
r600_ring_init(rdev, &rdev->ring[RADEON_RING_TYPE_GFX_INDEX], 1024 * 1024);
+ rdev->ring[R600_RING_TYPE_DMA_INDEX].ring_obj = NULL;
+ r600_ring_init(rdev, &rdev->ring[R600_RING_TYPE_DMA_INDEX], 64 * 1024);
+
rdev->ih.ring_obj = NULL;
r600_ih_ring_init(rdev, 64 * 1024);
@@ -1078,6 +1103,7 @@ int rv770_init(struct radeon_device *rdev)
if (r) {
dev_err(rdev->dev, "disabling GPU acceleration\n");
r700_cp_fini(rdev);
+ r600_dma_fini(rdev);
r600_irq_fini(rdev);
radeon_wb_fini(rdev);
radeon_ib_pool_fini(rdev);
@@ -1093,6 +1119,7 @@ void rv770_fini(struct radeon_device *rdev)
{
r600_blit_fini(rdev);
r700_cp_fini(rdev);
+ r600_dma_fini(rdev);
r600_irq_fini(rdev);
radeon_wb_fini(rdev);
radeon_ib_pool_fini(rdev);
diff --git a/drivers/gpu/drm/radeon/rv770d.h b/drivers/gpu/drm/radeon/rv770d.h
index b0adfc595d75..20e29d23d348 100644
--- a/drivers/gpu/drm/radeon/rv770d.h
+++ b/drivers/gpu/drm/radeon/rv770d.h
@@ -109,6 +109,9 @@
#define PIPE_TILING__SHIFT 1
#define PIPE_TILING__MASK 0x0000000e
+#define DMA_TILING_CONFIG 0x3ec8
+#define DMA_TILING_CONFIG2 0xd0b8
+
#define GC_USER_SHADER_PIPE_CONFIG 0x8954
#define INACTIVE_QD_PIPES(x) ((x) << 8)
#define INACTIVE_QD_PIPES_MASK 0x0000FF00
@@ -358,6 +361,26 @@
#define WAIT_UNTIL 0x8040
+/* async DMA */
+#define DMA_RB_RPTR 0xd008
+#define DMA_RB_WPTR 0xd00c
+
+/* async DMA packets */
+#define DMA_PACKET(cmd, t, s, n) ((((cmd) & 0xF) << 28) | \
+ (((t) & 0x1) << 23) | \
+ (((s) & 0x1) << 22) | \
+ (((n) & 0xFFFF) << 0))
+/* async DMA Packet types */
+#define DMA_PACKET_WRITE 0x2
+#define DMA_PACKET_COPY 0x3
+#define DMA_PACKET_INDIRECT_BUFFER 0x4
+#define DMA_PACKET_SEMAPHORE 0x5
+#define DMA_PACKET_FENCE 0x6
+#define DMA_PACKET_TRAP 0x7
+#define DMA_PACKET_CONSTANT_FILL 0xd
+#define DMA_PACKET_NOP 0xf
+
+
#define SRBM_STATUS 0x0E50
/* DCE 3.2 HDMI */
@@ -551,6 +574,54 @@
#define HDMI_OFFSET0 (0x7400 - 0x7400)
#define HDMI_OFFSET1 (0x7800 - 0x7400)
+/* DCE3.2 ELD audio interface */
+#define AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR0 0x71c8 /* LPCM */
+#define AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR1 0x71cc /* AC3 */
+#define AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR2 0x71d0 /* MPEG1 */
+#define AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR3 0x71d4 /* MP3 */
+#define AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR4 0x71d8 /* MPEG2 */
+#define AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR5 0x71dc /* AAC */
+#define AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR6 0x71e0 /* DTS */
+#define AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR7 0x71e4 /* ATRAC */
+#define AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR8 0x71e8 /* one bit audio - leave at 0 (default) */
+#define AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR9 0x71ec /* Dolby Digital */
+#define AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR10 0x71f0 /* DTS-HD */
+#define AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR11 0x71f4 /* MAT-MLP */
+#define AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR12 0x71f8 /* DTS */
+#define AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR13 0x71fc /* WMA Pro */
+# define MAX_CHANNELS(x) (((x) & 0x7) << 0)
+/* max channels minus one. 7 = 8 channels */
+# define SUPPORTED_FREQUENCIES(x) (((x) & 0xff) << 8)
+# define DESCRIPTOR_BYTE_2(x) (((x) & 0xff) << 16)
+# define SUPPORTED_FREQUENCIES_STEREO(x) (((x) & 0xff) << 24) /* LPCM only */
+/* SUPPORTED_FREQUENCIES, SUPPORTED_FREQUENCIES_STEREO
+ * bit0 = 32 kHz
+ * bit1 = 44.1 kHz
+ * bit2 = 48 kHz
+ * bit3 = 88.2 kHz
+ * bit4 = 96 kHz
+ * bit5 = 176.4 kHz
+ * bit6 = 192 kHz
+ */
+
+#define AZ_HOT_PLUG_CONTROL 0x7300
+# define AZ_FORCE_CODEC_WAKE (1 << 0)
+# define PIN0_JACK_DETECTION_ENABLE (1 << 4)
+# define PIN1_JACK_DETECTION_ENABLE (1 << 5)
+# define PIN2_JACK_DETECTION_ENABLE (1 << 6)
+# define PIN3_JACK_DETECTION_ENABLE (1 << 7)
+# define PIN0_UNSOLICITED_RESPONSE_ENABLE (1 << 8)
+# define PIN1_UNSOLICITED_RESPONSE_ENABLE (1 << 9)
+# define PIN2_UNSOLICITED_RESPONSE_ENABLE (1 << 10)
+# define PIN3_UNSOLICITED_RESPONSE_ENABLE (1 << 11)
+# define CODEC_HOT_PLUG_ENABLE (1 << 12)
+# define PIN0_AUDIO_ENABLED (1 << 24)
+# define PIN1_AUDIO_ENABLED (1 << 25)
+# define PIN2_AUDIO_ENABLED (1 << 26)
+# define PIN3_AUDIO_ENABLED (1 << 27)
+# define AUDIO_ENABLED (1 << 31)
+
+
#define D1GRPH_PRIMARY_SURFACE_ADDRESS 0x6110
#define D1GRPH_PRIMARY_SURFACE_ADDRESS_HIGH 0x6914
#define D2GRPH_PRIMARY_SURFACE_ADDRESS_HIGH 0x6114
diff --git a/drivers/gpu/drm/radeon/si.c b/drivers/gpu/drm/radeon/si.c
index b0db712060fb..7e835d94df4a 100644
--- a/drivers/gpu/drm/radeon/si.c
+++ b/drivers/gpu/drm/radeon/si.c
@@ -1660,6 +1660,8 @@ static void si_gpu_init(struct radeon_device *rdev)
WREG32(GB_ADDR_CONFIG, gb_addr_config);
WREG32(DMIF_ADDR_CONFIG, gb_addr_config);
WREG32(HDP_ADDR_CONFIG, gb_addr_config);
+ WREG32(DMA_TILING_CONFIG + DMA0_REGISTER_OFFSET, gb_addr_config);
+ WREG32(DMA_TILING_CONFIG + DMA1_REGISTER_OFFSET, gb_addr_config);
si_tiling_mode_table_init(rdev);
@@ -1836,6 +1838,9 @@ static void si_cp_enable(struct radeon_device *rdev, bool enable)
radeon_ttm_set_active_vram_size(rdev, rdev->mc.visible_vram_size);
WREG32(CP_ME_CNTL, (CP_ME_HALT | CP_PFP_HALT | CP_CE_HALT));
WREG32(SCRATCH_UMSK, 0);
+ rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ready = false;
+ rdev->ring[CAYMAN_RING_TYPE_CP1_INDEX].ready = false;
+ rdev->ring[CAYMAN_RING_TYPE_CP2_INDEX].ready = false;
}
udelay(50);
}
@@ -2426,9 +2431,20 @@ static int si_pcie_gart_enable(struct radeon_device *rdev)
/* enable context1-15 */
WREG32(VM_CONTEXT1_PROTECTION_FAULT_DEFAULT_ADDR,
(u32)(rdev->dummy_page.addr >> 12));
- WREG32(VM_CONTEXT1_CNTL2, 0);
+ WREG32(VM_CONTEXT1_CNTL2, 4);
WREG32(VM_CONTEXT1_CNTL, ENABLE_CONTEXT | PAGE_TABLE_DEPTH(1) |
- RANGE_PROTECTION_FAULT_ENABLE_DEFAULT);
+ RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT |
+ RANGE_PROTECTION_FAULT_ENABLE_DEFAULT |
+ DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT |
+ DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT |
+ PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT |
+ PDE0_PROTECTION_FAULT_ENABLE_DEFAULT |
+ VALID_PROTECTION_FAULT_ENABLE_INTERRUPT |
+ VALID_PROTECTION_FAULT_ENABLE_DEFAULT |
+ READ_PROTECTION_FAULT_ENABLE_INTERRUPT |
+ READ_PROTECTION_FAULT_ENABLE_DEFAULT |
+ WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT |
+ WRITE_PROTECTION_FAULT_ENABLE_DEFAULT);
si_pcie_gart_tlb_flush(rdev);
DRM_INFO("PCIE GART of %uM enabled (table at 0x%016llX).\n",
@@ -2474,6 +2490,7 @@ static bool si_vm_reg_valid(u32 reg)
/* check config regs */
switch (reg) {
case GRBM_GFX_INDEX:
+ case CP_STRMOUT_CNTL:
case VGT_VTX_VECT_EJECT_REG:
case VGT_CACHE_INVALIDATION:
case VGT_ESGS_RING_SIZE:
@@ -2533,6 +2550,7 @@ static int si_vm_packet3_gfx_check(struct radeon_device *rdev,
u32 idx = pkt->idx + 1;
u32 idx_value = ib[idx];
u32 start_reg, end_reg, reg, i;
+ u32 command, info;
switch (pkt->opcode) {
case PACKET3_NOP:
@@ -2632,6 +2650,52 @@ static int si_vm_packet3_gfx_check(struct radeon_device *rdev,
return -EINVAL;
}
break;
+ case PACKET3_CP_DMA:
+ command = ib[idx + 4];
+ info = ib[idx + 1];
+ if (command & PACKET3_CP_DMA_CMD_SAS) {
+ /* src address space is register */
+ if (((info & 0x60000000) >> 29) == 0) {
+ start_reg = idx_value << 2;
+ if (command & PACKET3_CP_DMA_CMD_SAIC) {
+ reg = start_reg;
+ if (!si_vm_reg_valid(reg)) {
+ DRM_ERROR("CP DMA Bad SRC register\n");
+ return -EINVAL;
+ }
+ } else {
+ for (i = 0; i < (command & 0x1fffff); i++) {
+ reg = start_reg + (4 * i);
+ if (!si_vm_reg_valid(reg)) {
+ DRM_ERROR("CP DMA Bad SRC register\n");
+ return -EINVAL;
+ }
+ }
+ }
+ }
+ }
+ if (command & PACKET3_CP_DMA_CMD_DAS) {
+ /* dst address space is register */
+ if (((info & 0x00300000) >> 20) == 0) {
+ start_reg = ib[idx + 2];
+ if (command & PACKET3_CP_DMA_CMD_DAIC) {
+ reg = start_reg;
+ if (!si_vm_reg_valid(reg)) {
+ DRM_ERROR("CP DMA Bad DST register\n");
+ return -EINVAL;
+ }
+ } else {
+ for (i = 0; i < (command & 0x1fffff); i++) {
+ reg = start_reg + (4 * i);
+ if (!si_vm_reg_valid(reg)) {
+ DRM_ERROR("CP DMA Bad DST register\n");
+ return -EINVAL;
+ }
+ }
+ }
+ }
+ }
+ break;
default:
DRM_ERROR("Invalid GFX packet3: 0x%x\n", pkt->opcode);
return -EINVAL;
@@ -2808,30 +2872,86 @@ void si_vm_set_page(struct radeon_device *rdev, uint64_t pe,
{
struct radeon_ring *ring = &rdev->ring[rdev->asic->vm.pt_ring_index];
uint32_t r600_flags = cayman_vm_page_flags(rdev, flags);
-
- while (count) {
- unsigned ndw = 2 + count * 2;
- if (ndw > 0x3FFE)
- ndw = 0x3FFE;
-
- radeon_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, ndw));
- radeon_ring_write(ring, (WRITE_DATA_ENGINE_SEL(0) |
- WRITE_DATA_DST_SEL(1)));
- radeon_ring_write(ring, pe);
- radeon_ring_write(ring, upper_32_bits(pe));
- for (; ndw > 2; ndw -= 2, --count, pe += 8) {
- uint64_t value;
- if (flags & RADEON_VM_PAGE_SYSTEM) {
- value = radeon_vm_map_gart(rdev, addr);
- value &= 0xFFFFFFFFFFFFF000ULL;
- } else if (flags & RADEON_VM_PAGE_VALID)
- value = addr;
- else
- value = 0;
- addr += incr;
- value |= r600_flags;
- radeon_ring_write(ring, value);
- radeon_ring_write(ring, upper_32_bits(value));
+ uint64_t value;
+ unsigned ndw;
+
+ if (rdev->asic->vm.pt_ring_index == RADEON_RING_TYPE_GFX_INDEX) {
+ while (count) {
+ ndw = 2 + count * 2;
+ if (ndw > 0x3FFE)
+ ndw = 0x3FFE;
+
+ radeon_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, ndw));
+ radeon_ring_write(ring, (WRITE_DATA_ENGINE_SEL(0) |
+ WRITE_DATA_DST_SEL(1)));
+ radeon_ring_write(ring, pe);
+ radeon_ring_write(ring, upper_32_bits(pe));
+ for (; ndw > 2; ndw -= 2, --count, pe += 8) {
+ if (flags & RADEON_VM_PAGE_SYSTEM) {
+ value = radeon_vm_map_gart(rdev, addr);
+ value &= 0xFFFFFFFFFFFFF000ULL;
+ } else if (flags & RADEON_VM_PAGE_VALID) {
+ value = addr;
+ } else {
+ value = 0;
+ }
+ addr += incr;
+ value |= r600_flags;
+ radeon_ring_write(ring, value);
+ radeon_ring_write(ring, upper_32_bits(value));
+ }
+ }
+ } else {
+ /* DMA */
+ if (flags & RADEON_VM_PAGE_SYSTEM) {
+ while (count) {
+ ndw = count * 2;
+ if (ndw > 0xFFFFE)
+ ndw = 0xFFFFE;
+
+ /* for non-physically contiguous pages (system) */
+ radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_WRITE, 0, 0, 0, ndw));
+ radeon_ring_write(ring, pe);
+ radeon_ring_write(ring, upper_32_bits(pe) & 0xff);
+ for (; ndw > 0; ndw -= 2, --count, pe += 8) {
+ if (flags & RADEON_VM_PAGE_SYSTEM) {
+ value = radeon_vm_map_gart(rdev, addr);
+ value &= 0xFFFFFFFFFFFFF000ULL;
+ } else if (flags & RADEON_VM_PAGE_VALID) {
+ value = addr;
+ } else {
+ value = 0;
+ }
+ addr += incr;
+ value |= r600_flags;
+ radeon_ring_write(ring, value);
+ radeon_ring_write(ring, upper_32_bits(value));
+ }
+ }
+ } else {
+ while (count) {
+ ndw = count * 2;
+ if (ndw > 0xFFFFE)
+ ndw = 0xFFFFE;
+
+ if (flags & RADEON_VM_PAGE_VALID)
+ value = addr;
+ else
+ value = 0;
+ /* for physically contiguous pages (vram) */
+ radeon_ring_write(ring, DMA_PTE_PDE_PACKET(ndw));
+ radeon_ring_write(ring, pe); /* dst addr */
+ radeon_ring_write(ring, upper_32_bits(pe) & 0xff);
+ radeon_ring_write(ring, r600_flags); /* mask */
+ radeon_ring_write(ring, 0);
+ radeon_ring_write(ring, value); /* value */
+ radeon_ring_write(ring, upper_32_bits(value));
+ radeon_ring_write(ring, incr); /* increment size */
+ radeon_ring_write(ring, 0);
+ pe += ndw * 4;
+ addr += (ndw / 2) * incr;
+ count -= ndw / 2;
+ }
}
}
}
@@ -2879,6 +2999,32 @@ void si_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm)
radeon_ring_write(ring, 0x0);
}
+void si_dma_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm)
+{
+ struct radeon_ring *ring = &rdev->ring[ridx];
+
+ if (vm == NULL)
+ return;
+
+ radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_SRBM_WRITE, 0, 0, 0, 0));
+ if (vm->id < 8) {
+ radeon_ring_write(ring, (0xf << 16) | ((VM_CONTEXT0_PAGE_TABLE_BASE_ADDR + (vm->id << 2)) >> 2));
+ } else {
+ radeon_ring_write(ring, (0xf << 16) | ((VM_CONTEXT8_PAGE_TABLE_BASE_ADDR + ((vm->id - 8) << 2)) >> 2));
+ }
+ radeon_ring_write(ring, vm->pd_gpu_addr >> 12);
+
+ /* flush hdp cache */
+ radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_SRBM_WRITE, 0, 0, 0, 0));
+ radeon_ring_write(ring, (0xf << 16) | (HDP_MEM_COHERENCY_FLUSH_CNTL >> 2));
+ radeon_ring_write(ring, 1);
+
+ /* bits 0-7 are the VM contexts0-7 */
+ radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_SRBM_WRITE, 0, 0, 0, 0));
+ radeon_ring_write(ring, (0xf << 16) | (VM_INVALIDATE_REQUEST >> 2));
+ radeon_ring_write(ring, 1 << vm->id);
+}
+
/*
* RLC
*/
@@ -3047,6 +3193,10 @@ static void si_disable_interrupt_state(struct radeon_device *rdev)
WREG32(CP_INT_CNTL_RING0, CNTX_BUSY_INT_ENABLE | CNTX_EMPTY_INT_ENABLE);
WREG32(CP_INT_CNTL_RING1, 0);
WREG32(CP_INT_CNTL_RING2, 0);
+ tmp = RREG32(DMA_CNTL + DMA0_REGISTER_OFFSET) & ~TRAP_ENABLE;
+ WREG32(DMA_CNTL + DMA0_REGISTER_OFFSET, tmp);
+ tmp = RREG32(DMA_CNTL + DMA1_REGISTER_OFFSET) & ~TRAP_ENABLE;
+ WREG32(DMA_CNTL + DMA1_REGISTER_OFFSET, tmp);
WREG32(GRBM_INT_CNTL, 0);
WREG32(INT_MASK + EVERGREEN_CRTC0_REGISTER_OFFSET, 0);
WREG32(INT_MASK + EVERGREEN_CRTC1_REGISTER_OFFSET, 0);
@@ -3166,6 +3316,7 @@ int si_irq_set(struct radeon_device *rdev)
u32 hpd1, hpd2, hpd3, hpd4, hpd5, hpd6;
u32 grbm_int_cntl = 0;
u32 grph1 = 0, grph2 = 0, grph3 = 0, grph4 = 0, grph5 = 0, grph6 = 0;
+ u32 dma_cntl, dma_cntl1;
if (!rdev->irq.installed) {
WARN(1, "Can't enable IRQ/MSI because no handler is installed\n");
@@ -3186,6 +3337,9 @@ int si_irq_set(struct radeon_device *rdev)
hpd5 = RREG32(DC_HPD5_INT_CONTROL) & ~DC_HPDx_INT_EN;
hpd6 = RREG32(DC_HPD6_INT_CONTROL) & ~DC_HPDx_INT_EN;
+ dma_cntl = RREG32(DMA_CNTL + DMA0_REGISTER_OFFSET) & ~TRAP_ENABLE;
+ dma_cntl1 = RREG32(DMA_CNTL + DMA1_REGISTER_OFFSET) & ~TRAP_ENABLE;
+
/* enable CP interrupts on all rings */
if (atomic_read(&rdev->irq.ring_int[RADEON_RING_TYPE_GFX_INDEX])) {
DRM_DEBUG("si_irq_set: sw int gfx\n");
@@ -3199,6 +3353,15 @@ int si_irq_set(struct radeon_device *rdev)
DRM_DEBUG("si_irq_set: sw int cp2\n");
cp_int_cntl2 |= TIME_STAMP_INT_ENABLE;
}
+ if (atomic_read(&rdev->irq.ring_int[R600_RING_TYPE_DMA_INDEX])) {
+ DRM_DEBUG("si_irq_set: sw int dma\n");
+ dma_cntl |= TRAP_ENABLE;
+ }
+
+ if (atomic_read(&rdev->irq.ring_int[CAYMAN_RING_TYPE_DMA1_INDEX])) {
+ DRM_DEBUG("si_irq_set: sw int dma1\n");
+ dma_cntl1 |= TRAP_ENABLE;
+ }
if (rdev->irq.crtc_vblank_int[0] ||
atomic_read(&rdev->irq.pflip[0])) {
DRM_DEBUG("si_irq_set: vblank 0\n");
@@ -3258,6 +3421,9 @@ int si_irq_set(struct radeon_device *rdev)
WREG32(CP_INT_CNTL_RING1, cp_int_cntl1);
WREG32(CP_INT_CNTL_RING2, cp_int_cntl2);
+ WREG32(DMA_CNTL + DMA0_REGISTER_OFFSET, dma_cntl);
+ WREG32(DMA_CNTL + DMA1_REGISTER_OFFSET, dma_cntl1);
+
WREG32(GRBM_INT_CNTL, grbm_int_cntl);
WREG32(INT_MASK + EVERGREEN_CRTC0_REGISTER_OFFSET, crtc1);
@@ -3683,6 +3849,16 @@ restart_ih:
break;
}
break;
+ case 146:
+ case 147:
+ dev_err(rdev->dev, "GPU fault detected: %d 0x%08x\n", src_id, src_data);
+ dev_err(rdev->dev, " VM_CONTEXT1_PROTECTION_FAULT_ADDR 0x%08X\n",
+ RREG32(VM_CONTEXT1_PROTECTION_FAULT_ADDR));
+ dev_err(rdev->dev, " VM_CONTEXT1_PROTECTION_FAULT_STATUS 0x%08X\n",
+ RREG32(VM_CONTEXT1_PROTECTION_FAULT_STATUS));
+ /* reset addr and status */
+ WREG32_P(VM_CONTEXT1_CNTL2, 1, ~1);
+ break;
case 176: /* RINGID0 CP_INT */
radeon_fence_process(rdev, RADEON_RING_TYPE_GFX_INDEX);
break;
@@ -3706,9 +3882,17 @@ restart_ih:
break;
}
break;
+ case 224: /* DMA trap event */
+ DRM_DEBUG("IH: DMA trap\n");
+ radeon_fence_process(rdev, R600_RING_TYPE_DMA_INDEX);
+ break;
case 233: /* GUI IDLE */
DRM_DEBUG("IH: GUI idle\n");
break;
+ case 244: /* DMA trap event */
+ DRM_DEBUG("IH: DMA1 trap\n");
+ radeon_fence_process(rdev, CAYMAN_RING_TYPE_DMA1_INDEX);
+ break;
default:
DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
break;
@@ -3732,6 +3916,80 @@ restart_ih:
return IRQ_HANDLED;
}
+/**
+ * si_copy_dma - copy pages using the DMA engine
+ *
+ * @rdev: radeon_device pointer
+ * @src_offset: src GPU address
+ * @dst_offset: dst GPU address
+ * @num_gpu_pages: number of GPU pages to xfer
+ * @fence: radeon fence object
+ *
+ * Copy GPU paging using the DMA engine (SI).
+ * Used by the radeon ttm implementation to move pages if
+ * registered as the asic copy callback.
+ */
+int si_copy_dma(struct radeon_device *rdev,
+ uint64_t src_offset, uint64_t dst_offset,
+ unsigned num_gpu_pages,
+ struct radeon_fence **fence)
+{
+ struct radeon_semaphore *sem = NULL;
+ int ring_index = rdev->asic->copy.dma_ring_index;
+ struct radeon_ring *ring = &rdev->ring[ring_index];
+ u32 size_in_bytes, cur_size_in_bytes;
+ int i, num_loops;
+ int r = 0;
+
+ r = radeon_semaphore_create(rdev, &sem);
+ if (r) {
+ DRM_ERROR("radeon: moving bo (%d).\n", r);
+ return r;
+ }
+
+ size_in_bytes = (num_gpu_pages << RADEON_GPU_PAGE_SHIFT);
+ num_loops = DIV_ROUND_UP(size_in_bytes, 0xfffff);
+ r = radeon_ring_lock(rdev, ring, num_loops * 5 + 11);
+ if (r) {
+ DRM_ERROR("radeon: moving bo (%d).\n", r);
+ radeon_semaphore_free(rdev, &sem, NULL);
+ return r;
+ }
+
+ if (radeon_fence_need_sync(*fence, ring->idx)) {
+ radeon_semaphore_sync_rings(rdev, sem, (*fence)->ring,
+ ring->idx);
+ radeon_fence_note_sync(*fence, ring->idx);
+ } else {
+ radeon_semaphore_free(rdev, &sem, NULL);
+ }
+
+ for (i = 0; i < num_loops; i++) {
+ cur_size_in_bytes = size_in_bytes;
+ if (cur_size_in_bytes > 0xFFFFF)
+ cur_size_in_bytes = 0xFFFFF;
+ size_in_bytes -= cur_size_in_bytes;
+ radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_COPY, 1, 0, 0, cur_size_in_bytes));
+ radeon_ring_write(ring, dst_offset & 0xffffffff);
+ radeon_ring_write(ring, src_offset & 0xffffffff);
+ radeon_ring_write(ring, upper_32_bits(dst_offset) & 0xff);
+ radeon_ring_write(ring, upper_32_bits(src_offset) & 0xff);
+ src_offset += cur_size_in_bytes;
+ dst_offset += cur_size_in_bytes;
+ }
+
+ r = radeon_fence_emit(rdev, fence, ring->idx);
+ if (r) {
+ radeon_ring_unlock_undo(rdev, ring);
+ return r;
+ }
+
+ radeon_ring_unlock_commit(rdev, ring);
+ radeon_semaphore_free(rdev, &sem, *fence);
+
+ return r;
+}
+
/*
* startup/shutdown callbacks
*/
@@ -3803,6 +4061,18 @@ static int si_startup(struct radeon_device *rdev)
return r;
}
+ r = radeon_fence_driver_start_ring(rdev, R600_RING_TYPE_DMA_INDEX);
+ if (r) {
+ dev_err(rdev->dev, "failed initializing DMA fences (%d).\n", r);
+ return r;
+ }
+
+ r = radeon_fence_driver_start_ring(rdev, CAYMAN_RING_TYPE_DMA1_INDEX);
+ if (r) {
+ dev_err(rdev->dev, "failed initializing DMA fences (%d).\n", r);
+ return r;
+ }
+
/* Enable IRQ */
r = si_irq_init(rdev);
if (r) {
@@ -3833,6 +4103,22 @@ static int si_startup(struct radeon_device *rdev)
if (r)
return r;
+ ring = &rdev->ring[R600_RING_TYPE_DMA_INDEX];
+ r = radeon_ring_init(rdev, ring, ring->ring_size, R600_WB_DMA_RPTR_OFFSET,
+ DMA_RB_RPTR + DMA0_REGISTER_OFFSET,
+ DMA_RB_WPTR + DMA0_REGISTER_OFFSET,
+ 2, 0x3fffc, DMA_PACKET(DMA_PACKET_NOP, 0, 0, 0, 0));
+ if (r)
+ return r;
+
+ ring = &rdev->ring[CAYMAN_RING_TYPE_DMA1_INDEX];
+ r = radeon_ring_init(rdev, ring, ring->ring_size, CAYMAN_WB_DMA1_RPTR_OFFSET,
+ DMA_RB_RPTR + DMA1_REGISTER_OFFSET,
+ DMA_RB_WPTR + DMA1_REGISTER_OFFSET,
+ 2, 0x3fffc, DMA_PACKET(DMA_PACKET_NOP, 0, 0, 0, 0));
+ if (r)
+ return r;
+
r = si_cp_load_microcode(rdev);
if (r)
return r;
@@ -3840,6 +4126,10 @@ static int si_startup(struct radeon_device *rdev)
if (r)
return r;
+ r = cayman_dma_resume(rdev);
+ if (r)
+ return r;
+
r = radeon_ib_pool_init(rdev);
if (r) {
dev_err(rdev->dev, "IB initialization failed (%d).\n", r);
@@ -3881,9 +4171,7 @@ int si_resume(struct radeon_device *rdev)
int si_suspend(struct radeon_device *rdev)
{
si_cp_enable(rdev, false);
- rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ready = false;
- rdev->ring[CAYMAN_RING_TYPE_CP1_INDEX].ready = false;
- rdev->ring[CAYMAN_RING_TYPE_CP2_INDEX].ready = false;
+ cayman_dma_stop(rdev);
si_irq_suspend(rdev);
radeon_wb_disable(rdev);
si_pcie_gart_disable(rdev);
@@ -3961,6 +4249,14 @@ int si_init(struct radeon_device *rdev)
ring->ring_obj = NULL;
r600_ring_init(rdev, ring, 1024 * 1024);
+ ring = &rdev->ring[R600_RING_TYPE_DMA_INDEX];
+ ring->ring_obj = NULL;
+ r600_ring_init(rdev, ring, 64 * 1024);
+
+ ring = &rdev->ring[CAYMAN_RING_TYPE_DMA1_INDEX];
+ ring->ring_obj = NULL;
+ r600_ring_init(rdev, ring, 64 * 1024);
+
rdev->ih.ring_obj = NULL;
r600_ih_ring_init(rdev, 64 * 1024);
@@ -3973,6 +4269,7 @@ int si_init(struct radeon_device *rdev)
if (r) {
dev_err(rdev->dev, "disabling GPU acceleration\n");
si_cp_fini(rdev);
+ cayman_dma_fini(rdev);
si_irq_fini(rdev);
si_rlc_fini(rdev);
radeon_wb_fini(rdev);
@@ -4001,6 +4298,7 @@ void si_fini(struct radeon_device *rdev)
r600_blit_fini(rdev);
#endif
si_cp_fini(rdev);
+ cayman_dma_fini(rdev);
si_irq_fini(rdev);
si_rlc_fini(rdev);
radeon_wb_fini(rdev);
diff --git a/drivers/gpu/drm/radeon/sid.h b/drivers/gpu/drm/radeon/sid.h
index 7d2a20e56577..62b46215d423 100644
--- a/drivers/gpu/drm/radeon/sid.h
+++ b/drivers/gpu/drm/radeon/sid.h
@@ -91,7 +91,18 @@
#define VM_CONTEXT0_CNTL 0x1410
#define ENABLE_CONTEXT (1 << 0)
#define PAGE_TABLE_DEPTH(x) (((x) & 3) << 1)
+#define RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT (1 << 3)
#define RANGE_PROTECTION_FAULT_ENABLE_DEFAULT (1 << 4)
+#define DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT (1 << 6)
+#define DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT (1 << 7)
+#define PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT (1 << 9)
+#define PDE0_PROTECTION_FAULT_ENABLE_DEFAULT (1 << 10)
+#define VALID_PROTECTION_FAULT_ENABLE_INTERRUPT (1 << 12)
+#define VALID_PROTECTION_FAULT_ENABLE_DEFAULT (1 << 13)
+#define READ_PROTECTION_FAULT_ENABLE_INTERRUPT (1 << 15)
+#define READ_PROTECTION_FAULT_ENABLE_DEFAULT (1 << 16)
+#define WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT (1 << 18)
+#define WRITE_PROTECTION_FAULT_ENABLE_DEFAULT (1 << 19)
#define VM_CONTEXT1_CNTL 0x1414
#define VM_CONTEXT0_CNTL2 0x1430
#define VM_CONTEXT1_CNTL2 0x1434
@@ -104,6 +115,9 @@
#define VM_CONTEXT14_PAGE_TABLE_BASE_ADDR 0x1450
#define VM_CONTEXT15_PAGE_TABLE_BASE_ADDR 0x1454
+#define VM_CONTEXT1_PROTECTION_FAULT_ADDR 0x14FC
+#define VM_CONTEXT1_PROTECTION_FAULT_STATUS 0x14DC
+
#define VM_INVALIDATE_REQUEST 0x1478
#define VM_INVALIDATE_RESPONSE 0x147c
@@ -424,6 +438,7 @@
# define RDERR_INT_ENABLE (1 << 0)
# define GUI_IDLE_INT_ENABLE (1 << 19)
+#define CP_STRMOUT_CNTL 0x84FC
#define SCRATCH_REG0 0x8500
#define SCRATCH_REG1 0x8504
#define SCRATCH_REG2 0x8508
@@ -834,6 +849,54 @@
#define PACKET3_WAIT_REG_MEM 0x3C
#define PACKET3_MEM_WRITE 0x3D
#define PACKET3_COPY_DATA 0x40
+#define PACKET3_CP_DMA 0x41
+/* 1. header
+ * 2. SRC_ADDR_LO or DATA [31:0]
+ * 3. CP_SYNC [31] | SRC_SEL [30:29] | ENGINE [27] | DST_SEL [21:20] |
+ * SRC_ADDR_HI [7:0]
+ * 4. DST_ADDR_LO [31:0]
+ * 5. DST_ADDR_HI [7:0]
+ * 6. COMMAND [30:21] | BYTE_COUNT [20:0]
+ */
+# define PACKET3_CP_DMA_DST_SEL(x) ((x) << 20)
+ /* 0 - SRC_ADDR
+ * 1 - GDS
+ */
+# define PACKET3_CP_DMA_ENGINE(x) ((x) << 27)
+ /* 0 - ME
+ * 1 - PFP
+ */
+# define PACKET3_CP_DMA_SRC_SEL(x) ((x) << 29)
+ /* 0 - SRC_ADDR
+ * 1 - GDS
+ * 2 - DATA
+ */
+# define PACKET3_CP_DMA_CP_SYNC (1 << 31)
+/* COMMAND */
+# define PACKET3_CP_DMA_DIS_WC (1 << 21)
+# define PACKET3_CP_DMA_CMD_SRC_SWAP(x) ((x) << 23)
+ /* 0 - none
+ * 1 - 8 in 16
+ * 2 - 8 in 32
+ * 3 - 8 in 64
+ */
+# define PACKET3_CP_DMA_CMD_DST_SWAP(x) ((x) << 24)
+ /* 0 - none
+ * 1 - 8 in 16
+ * 2 - 8 in 32
+ * 3 - 8 in 64
+ */
+# define PACKET3_CP_DMA_CMD_SAS (1 << 26)
+ /* 0 - memory
+ * 1 - register
+ */
+# define PACKET3_CP_DMA_CMD_DAS (1 << 27)
+ /* 0 - memory
+ * 1 - register
+ */
+# define PACKET3_CP_DMA_CMD_SAIC (1 << 28)
+# define PACKET3_CP_DMA_CMD_DAIC (1 << 29)
+# define PACKET3_CP_DMA_CMD_RAW_WAIT (1 << 30)
#define PACKET3_PFP_SYNC_ME 0x42
#define PACKET3_SURFACE_SYNC 0x43
# define PACKET3_DEST_BASE_0_ENA (1 << 0)
@@ -921,4 +984,61 @@
#define PACKET3_WAIT_ON_AVAIL_BUFFER 0x8A
#define PACKET3_SWITCH_BUFFER 0x8B
+/* ASYNC DMA - first instance at 0xd000, second at 0xd800 */
+#define DMA0_REGISTER_OFFSET 0x0 /* not a register */
+#define DMA1_REGISTER_OFFSET 0x800 /* not a register */
+
+#define DMA_RB_CNTL 0xd000
+# define DMA_RB_ENABLE (1 << 0)
+# define DMA_RB_SIZE(x) ((x) << 1) /* log2 */
+# define DMA_RB_SWAP_ENABLE (1 << 9) /* 8IN32 */
+# define DMA_RPTR_WRITEBACK_ENABLE (1 << 12)
+# define DMA_RPTR_WRITEBACK_SWAP_ENABLE (1 << 13) /* 8IN32 */
+# define DMA_RPTR_WRITEBACK_TIMER(x) ((x) << 16) /* log2 */
+#define DMA_RB_BASE 0xd004
+#define DMA_RB_RPTR 0xd008
+#define DMA_RB_WPTR 0xd00c
+
+#define DMA_RB_RPTR_ADDR_HI 0xd01c
+#define DMA_RB_RPTR_ADDR_LO 0xd020
+
+#define DMA_IB_CNTL 0xd024
+# define DMA_IB_ENABLE (1 << 0)
+# define DMA_IB_SWAP_ENABLE (1 << 4)
+#define DMA_IB_RPTR 0xd028
+#define DMA_CNTL 0xd02c
+# define TRAP_ENABLE (1 << 0)
+# define SEM_INCOMPLETE_INT_ENABLE (1 << 1)
+# define SEM_WAIT_INT_ENABLE (1 << 2)
+# define DATA_SWAP_ENABLE (1 << 3)
+# define FENCE_SWAP_ENABLE (1 << 4)
+# define CTXEMPTY_INT_ENABLE (1 << 28)
+#define DMA_TILING_CONFIG 0xd0b8
+
+#define DMA_PACKET(cmd, b, t, s, n) ((((cmd) & 0xF) << 28) | \
+ (((b) & 0x1) << 26) | \
+ (((t) & 0x1) << 23) | \
+ (((s) & 0x1) << 22) | \
+ (((n) & 0xFFFFF) << 0))
+
+#define DMA_IB_PACKET(cmd, vmid, n) ((((cmd) & 0xF) << 28) | \
+ (((vmid) & 0xF) << 20) | \
+ (((n) & 0xFFFFF) << 0))
+
+#define DMA_PTE_PDE_PACKET(n) ((2 << 28) | \
+ (1 << 26) | \
+ (1 << 21) | \
+ (((n) & 0xFFFFF) << 0))
+
+/* async DMA Packet types */
+#define DMA_PACKET_WRITE 0x2
+#define DMA_PACKET_COPY 0x3
+#define DMA_PACKET_INDIRECT_BUFFER 0x4
+#define DMA_PACKET_SEMAPHORE 0x5
+#define DMA_PACKET_FENCE 0x6
+#define DMA_PACKET_TRAP 0x7
+#define DMA_PACKET_SRBM_WRITE 0x9
+#define DMA_PACKET_CONSTANT_FILL 0xd
+#define DMA_PACKET_NOP 0xf
+
#endif
diff --git a/drivers/gpu/drm/shmobile/shmob_drm_crtc.c b/drivers/gpu/drm/shmobile/shmob_drm_crtc.c
index 0e7a9306bd0c..d917a411ca85 100644
--- a/drivers/gpu/drm/shmobile/shmob_drm_crtc.c
+++ b/drivers/gpu/drm/shmobile/shmob_drm_crtc.c
@@ -748,7 +748,7 @@ int shmob_drm_connector_create(struct shmob_drm_device *sdev,
connector->encoder = encoder;
drm_helper_connector_dpms(connector, DRM_MODE_DPMS_OFF);
- drm_connector_property_set_value(connector,
+ drm_object_property_set_value(&connector->base,
sdev->ddev->mode_config.dpms_property, DRM_MODE_DPMS_OFF);
return 0;
diff --git a/drivers/gpu/drm/tegra/dc.c b/drivers/gpu/drm/tegra/dc.c
index 53b98520ebd6..074410371e2a 100644
--- a/drivers/gpu/drm/tegra/dc.c
+++ b/drivers/gpu/drm/tegra/dc.c
@@ -218,7 +218,7 @@ static int tegra_crtc_mode_set(struct drm_crtc *crtc,
}
bpp = crtc->fb->bits_per_pixel / 8;
- win.stride = win.outw * bpp;
+ win.stride = crtc->fb->pitches[0];
/* program window registers */
value = tegra_dc_readl(dc, DC_CMD_DISPLAY_WINDOW_HEADER);
@@ -818,6 +818,7 @@ static int tegra_dc_remove(struct platform_device *pdev)
}
static struct of_device_id tegra_dc_of_match[] = {
+ { .compatible = "nvidia,tegra30-dc", },
{ .compatible = "nvidia,tegra20-dc", },
{ },
};
diff --git a/drivers/gpu/drm/tegra/hdmi.c b/drivers/gpu/drm/tegra/hdmi.c
index 58f55dc6d59d..ab4016412bbf 100644
--- a/drivers/gpu/drm/tegra/hdmi.c
+++ b/drivers/gpu/drm/tegra/hdmi.c
@@ -1318,8 +1318,8 @@ static int tegra_hdmi_remove(struct platform_device *pdev)
}
static struct of_device_id tegra_hdmi_of_match[] = {
- { .compatible = "nvidia,tegra20-hdmi", },
{ .compatible = "nvidia,tegra30-hdmi", },
+ { .compatible = "nvidia,tegra20-hdmi", },
{ },
};
diff --git a/drivers/gpu/drm/tegra/host1x.c b/drivers/gpu/drm/tegra/host1x.c
index 1f728cd7f61a..bdb97a564d82 100644
--- a/drivers/gpu/drm/tegra/host1x.c
+++ b/drivers/gpu/drm/tegra/host1x.c
@@ -68,6 +68,8 @@ static int host1x_parse_dt(struct host1x *host1x)
static const char * const compat[] = {
"nvidia,tegra20-dc",
"nvidia,tegra20-hdmi",
+ "nvidia,tegra30-dc",
+ "nvidia,tegra30-hdmi",
};
unsigned int i;
int err;
@@ -268,6 +270,7 @@ int host1x_unregister_client(struct host1x *host1x,
}
static struct of_device_id tegra_host1x_of_match[] = {
+ { .compatible = "nvidia,tegra30-host1x", },
{ .compatible = "nvidia,tegra20-host1x", },
{ },
};
diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c
index 7426fe59108e..a9151337d5b9 100644
--- a/drivers/gpu/drm/ttm/ttm_bo.c
+++ b/drivers/gpu/drm/ttm/ttm_bo.c
@@ -366,7 +366,7 @@ static int ttm_bo_add_ttm(struct ttm_buffer_object *bo, bool zero_alloc)
static int ttm_bo_handle_move_mem(struct ttm_buffer_object *bo,
struct ttm_mem_reg *mem,
bool evict, bool interruptible,
- bool no_wait_reserve, bool no_wait_gpu)
+ bool no_wait_gpu)
{
struct ttm_bo_device *bdev = bo->bdev;
bool old_is_pci = ttm_mem_reg_is_pci(bdev, &bo->mem);
@@ -420,12 +420,12 @@ static int ttm_bo_handle_move_mem(struct ttm_buffer_object *bo,
if (!(old_man->flags & TTM_MEMTYPE_FLAG_FIXED) &&
!(new_man->flags & TTM_MEMTYPE_FLAG_FIXED))
- ret = ttm_bo_move_ttm(bo, evict, no_wait_reserve, no_wait_gpu, mem);
+ ret = ttm_bo_move_ttm(bo, evict, no_wait_gpu, mem);
else if (bdev->driver->move)
ret = bdev->driver->move(bo, evict, interruptible,
- no_wait_reserve, no_wait_gpu, mem);
+ no_wait_gpu, mem);
else
- ret = ttm_bo_move_memcpy(bo, evict, no_wait_reserve, no_wait_gpu, mem);
+ ret = ttm_bo_move_memcpy(bo, evict, no_wait_gpu, mem);
if (ret) {
if (bdev->driver->move_notify) {
@@ -488,39 +488,33 @@ static void ttm_bo_cleanup_memtype_use(struct ttm_buffer_object *bo)
ttm_bo_mem_put(bo, &bo->mem);
atomic_set(&bo->reserved, 0);
+ wake_up_all(&bo->event_queue);
/*
- * Make processes trying to reserve really pick it up.
+ * Since the final reference to this bo may not be dropped by
+ * the current task we have to put a memory barrier here to make
+ * sure the changes done in this function are always visible.
+ *
+ * This function only needs protection against the final kref_put.
*/
- smp_mb__after_atomic_dec();
- wake_up_all(&bo->event_queue);
+ smp_mb__before_atomic_dec();
}
static void ttm_bo_cleanup_refs_or_queue(struct ttm_buffer_object *bo)
{
struct ttm_bo_device *bdev = bo->bdev;
struct ttm_bo_global *glob = bo->glob;
- struct ttm_bo_driver *driver;
+ struct ttm_bo_driver *driver = bdev->driver;
void *sync_obj = NULL;
int put_count;
int ret;
+ spin_lock(&glob->lru_lock);
+ ret = ttm_bo_reserve_locked(bo, false, true, false, 0);
+
spin_lock(&bdev->fence_lock);
(void) ttm_bo_wait(bo, false, false, true);
- if (!bo->sync_obj) {
-
- spin_lock(&glob->lru_lock);
-
- /**
- * Lock inversion between bo:reserve and bdev::fence_lock here,
- * but that's OK, since we're only trylocking.
- */
-
- ret = ttm_bo_reserve_locked(bo, false, true, false, 0);
-
- if (unlikely(ret == -EBUSY))
- goto queue;
-
+ if (!ret && !bo->sync_obj) {
spin_unlock(&bdev->fence_lock);
put_count = ttm_bo_del_from_lru(bo);
@@ -530,18 +524,19 @@ static void ttm_bo_cleanup_refs_or_queue(struct ttm_buffer_object *bo)
ttm_bo_list_ref_sub(bo, put_count, true);
return;
- } else {
- spin_lock(&glob->lru_lock);
}
-queue:
- driver = bdev->driver;
if (bo->sync_obj)
sync_obj = driver->sync_obj_ref(bo->sync_obj);
+ spin_unlock(&bdev->fence_lock);
+
+ if (!ret) {
+ atomic_set(&bo->reserved, 0);
+ wake_up_all(&bo->event_queue);
+ }
kref_get(&bo->list_kref);
list_add_tail(&bo->ddestroy, &bdev->ddestroy);
spin_unlock(&glob->lru_lock);
- spin_unlock(&bdev->fence_lock);
if (sync_obj) {
driver->sync_obj_flush(sync_obj);
@@ -552,68 +547,84 @@ queue:
}
/**
- * function ttm_bo_cleanup_refs
+ * function ttm_bo_cleanup_refs_and_unlock
* If bo idle, remove from delayed- and lru lists, and unref.
* If not idle, do nothing.
*
+ * Must be called with lru_lock and reservation held, this function
+ * will drop both before returning.
+ *
* @interruptible Any sleeps should occur interruptibly.
- * @no_wait_reserve Never wait for reserve. Return -EBUSY instead.
* @no_wait_gpu Never wait for gpu. Return -EBUSY instead.
*/
-static int ttm_bo_cleanup_refs(struct ttm_buffer_object *bo,
- bool interruptible,
- bool no_wait_reserve,
- bool no_wait_gpu)
+static int ttm_bo_cleanup_refs_and_unlock(struct ttm_buffer_object *bo,
+ bool interruptible,
+ bool no_wait_gpu)
{
struct ttm_bo_device *bdev = bo->bdev;
+ struct ttm_bo_driver *driver = bdev->driver;
struct ttm_bo_global *glob = bo->glob;
int put_count;
- int ret = 0;
+ int ret;
-retry:
spin_lock(&bdev->fence_lock);
- ret = ttm_bo_wait(bo, false, interruptible, no_wait_gpu);
- spin_unlock(&bdev->fence_lock);
+ ret = ttm_bo_wait(bo, false, false, true);
- if (unlikely(ret != 0))
- return ret;
+ if (ret && !no_wait_gpu) {
+ void *sync_obj;
-retry_reserve:
- spin_lock(&glob->lru_lock);
+ /*
+ * Take a reference to the fence and unreserve,
+ * at this point the buffer should be dead, so
+ * no new sync objects can be attached.
+ */
+ sync_obj = driver->sync_obj_ref(&bo->sync_obj);
+ spin_unlock(&bdev->fence_lock);
- if (unlikely(list_empty(&bo->ddestroy))) {
+ atomic_set(&bo->reserved, 0);
+ wake_up_all(&bo->event_queue);
spin_unlock(&glob->lru_lock);
- return 0;
- }
-
- ret = ttm_bo_reserve_locked(bo, false, true, false, 0);
- if (unlikely(ret == -EBUSY)) {
- spin_unlock(&glob->lru_lock);
- if (likely(!no_wait_reserve))
- ret = ttm_bo_wait_unreserved(bo, interruptible);
- if (unlikely(ret != 0))
+ ret = driver->sync_obj_wait(sync_obj, false, interruptible);
+ driver->sync_obj_unref(&sync_obj);
+ if (ret)
return ret;
- goto retry_reserve;
- }
+ /*
+ * remove sync_obj with ttm_bo_wait, the wait should be
+ * finished, and no new wait object should have been added.
+ */
+ spin_lock(&bdev->fence_lock);
+ ret = ttm_bo_wait(bo, false, false, true);
+ WARN_ON(ret);
+ spin_unlock(&bdev->fence_lock);
+ if (ret)
+ return ret;
- BUG_ON(ret != 0);
+ spin_lock(&glob->lru_lock);
+ ret = ttm_bo_reserve_locked(bo, false, true, false, 0);
- /**
- * We can re-check for sync object without taking
- * the bo::lock since setting the sync object requires
- * also bo::reserved. A busy object at this point may
- * be caused by another thread recently starting an accelerated
- * eviction.
- */
+ /*
+ * We raced, and lost, someone else holds the reservation now,
+ * and is probably busy in ttm_bo_cleanup_memtype_use.
+ *
+ * Even if it's not the case, because we finished waiting any
+ * delayed destruction would succeed, so just return success
+ * here.
+ */
+ if (ret) {
+ spin_unlock(&glob->lru_lock);
+ return 0;
+ }
+ } else
+ spin_unlock(&bdev->fence_lock);
- if (unlikely(bo->sync_obj)) {
+ if (ret || unlikely(list_empty(&bo->ddestroy))) {
atomic_set(&bo->reserved, 0);
wake_up_all(&bo->event_queue);
spin_unlock(&glob->lru_lock);
- goto retry;
+ return ret;
}
put_count = ttm_bo_del_from_lru(bo);
@@ -656,9 +667,13 @@ static int ttm_bo_delayed_delete(struct ttm_bo_device *bdev, bool remove_all)
kref_get(&nentry->list_kref);
}
- spin_unlock(&glob->lru_lock);
- ret = ttm_bo_cleanup_refs(entry, false, !remove_all,
- !remove_all);
+ ret = ttm_bo_reserve_locked(entry, false, !remove_all, false, 0);
+ if (!ret)
+ ret = ttm_bo_cleanup_refs_and_unlock(entry, false,
+ !remove_all);
+ else
+ spin_unlock(&glob->lru_lock);
+
kref_put(&entry->list_kref, ttm_bo_release_list);
entry = nentry;
@@ -696,6 +711,7 @@ static void ttm_bo_release(struct kref *kref)
struct ttm_bo_device *bdev = bo->bdev;
struct ttm_mem_type_manager *man = &bdev->man[bo->mem.mem_type];
+ write_lock(&bdev->vm_lock);
if (likely(bo->vm_node != NULL)) {
rb_erase(&bo->vm_rb, &bdev->addr_space_rb);
drm_mm_put_block(bo->vm_node);
@@ -707,18 +723,14 @@ static void ttm_bo_release(struct kref *kref)
ttm_mem_io_unlock(man);
ttm_bo_cleanup_refs_or_queue(bo);
kref_put(&bo->list_kref, ttm_bo_release_list);
- write_lock(&bdev->vm_lock);
}
void ttm_bo_unref(struct ttm_buffer_object **p_bo)
{
struct ttm_buffer_object *bo = *p_bo;
- struct ttm_bo_device *bdev = bo->bdev;
*p_bo = NULL;
- write_lock(&bdev->vm_lock);
kref_put(&bo->kref, ttm_bo_release);
- write_unlock(&bdev->vm_lock);
}
EXPORT_SYMBOL(ttm_bo_unref);
@@ -737,7 +749,7 @@ void ttm_bo_unlock_delayed_workqueue(struct ttm_bo_device *bdev, int resched)
EXPORT_SYMBOL(ttm_bo_unlock_delayed_workqueue);
static int ttm_bo_evict(struct ttm_buffer_object *bo, bool interruptible,
- bool no_wait_reserve, bool no_wait_gpu)
+ bool no_wait_gpu)
{
struct ttm_bo_device *bdev = bo->bdev;
struct ttm_mem_reg evict_mem;
@@ -768,7 +780,7 @@ static int ttm_bo_evict(struct ttm_buffer_object *bo, bool interruptible,
placement.num_busy_placement = 0;
bdev->driver->evict_flags(bo, &placement);
ret = ttm_bo_mem_space(bo, &placement, &evict_mem, interruptible,
- no_wait_reserve, no_wait_gpu);
+ no_wait_gpu);
if (ret) {
if (ret != -ERESTARTSYS) {
pr_err("Failed to find memory space for buffer 0x%p eviction\n",
@@ -779,7 +791,7 @@ static int ttm_bo_evict(struct ttm_buffer_object *bo, bool interruptible,
}
ret = ttm_bo_handle_move_mem(bo, &evict_mem, true, interruptible,
- no_wait_reserve, no_wait_gpu);
+ no_wait_gpu);
if (ret) {
if (ret != -ERESTARTSYS)
pr_err("Buffer eviction failed\n");
@@ -793,49 +805,33 @@ out:
static int ttm_mem_evict_first(struct ttm_bo_device *bdev,
uint32_t mem_type,
- bool interruptible, bool no_wait_reserve,
+ bool interruptible,
bool no_wait_gpu)
{
struct ttm_bo_global *glob = bdev->glob;
struct ttm_mem_type_manager *man = &bdev->man[mem_type];
struct ttm_buffer_object *bo;
- int ret, put_count = 0;
+ int ret = -EBUSY, put_count;
-retry:
spin_lock(&glob->lru_lock);
- if (list_empty(&man->lru)) {
- spin_unlock(&glob->lru_lock);
- return -EBUSY;
+ list_for_each_entry(bo, &man->lru, lru) {
+ ret = ttm_bo_reserve_locked(bo, false, true, false, 0);
+ if (!ret)
+ break;
}
- bo = list_first_entry(&man->lru, struct ttm_buffer_object, lru);
- kref_get(&bo->list_kref);
-
- if (!list_empty(&bo->ddestroy)) {
+ if (ret) {
spin_unlock(&glob->lru_lock);
- ret = ttm_bo_cleanup_refs(bo, interruptible,
- no_wait_reserve, no_wait_gpu);
- kref_put(&bo->list_kref, ttm_bo_release_list);
-
return ret;
}
- ret = ttm_bo_reserve_locked(bo, false, true, false, 0);
-
- if (unlikely(ret == -EBUSY)) {
- spin_unlock(&glob->lru_lock);
- if (likely(!no_wait_reserve))
- ret = ttm_bo_wait_unreserved(bo, interruptible);
+ kref_get(&bo->list_kref);
+ if (!list_empty(&bo->ddestroy)) {
+ ret = ttm_bo_cleanup_refs_and_unlock(bo, interruptible,
+ no_wait_gpu);
kref_put(&bo->list_kref, ttm_bo_release_list);
-
- /**
- * We *need* to retry after releasing the lru lock.
- */
-
- if (unlikely(ret != 0))
- return ret;
- goto retry;
+ return ret;
}
put_count = ttm_bo_del_from_lru(bo);
@@ -845,7 +841,7 @@ retry:
ttm_bo_list_ref_sub(bo, put_count, true);
- ret = ttm_bo_evict(bo, interruptible, no_wait_reserve, no_wait_gpu);
+ ret = ttm_bo_evict(bo, interruptible, no_wait_gpu);
ttm_bo_unreserve(bo);
kref_put(&bo->list_kref, ttm_bo_release_list);
@@ -870,7 +866,6 @@ static int ttm_bo_mem_force_space(struct ttm_buffer_object *bo,
struct ttm_placement *placement,
struct ttm_mem_reg *mem,
bool interruptible,
- bool no_wait_reserve,
bool no_wait_gpu)
{
struct ttm_bo_device *bdev = bo->bdev;
@@ -883,8 +878,8 @@ static int ttm_bo_mem_force_space(struct ttm_buffer_object *bo,
return ret;
if (mem->mm_node)
break;
- ret = ttm_mem_evict_first(bdev, mem_type, interruptible,
- no_wait_reserve, no_wait_gpu);
+ ret = ttm_mem_evict_first(bdev, mem_type,
+ interruptible, no_wait_gpu);
if (unlikely(ret != 0))
return ret;
} while (1);
@@ -949,7 +944,7 @@ static bool ttm_bo_mt_compatible(struct ttm_mem_type_manager *man,
int ttm_bo_mem_space(struct ttm_buffer_object *bo,
struct ttm_placement *placement,
struct ttm_mem_reg *mem,
- bool interruptible, bool no_wait_reserve,
+ bool interruptible,
bool no_wait_gpu)
{
struct ttm_bo_device *bdev = bo->bdev;
@@ -1040,7 +1035,7 @@ int ttm_bo_mem_space(struct ttm_buffer_object *bo,
}
ret = ttm_bo_mem_force_space(bo, mem_type, placement, mem,
- interruptible, no_wait_reserve, no_wait_gpu);
+ interruptible, no_wait_gpu);
if (ret == 0 && mem->mm_node) {
mem->placement = cur_flags;
return 0;
@@ -1055,7 +1050,7 @@ EXPORT_SYMBOL(ttm_bo_mem_space);
int ttm_bo_move_buffer(struct ttm_buffer_object *bo,
struct ttm_placement *placement,
- bool interruptible, bool no_wait_reserve,
+ bool interruptible,
bool no_wait_gpu)
{
int ret = 0;
@@ -1082,10 +1077,12 @@ int ttm_bo_move_buffer(struct ttm_buffer_object *bo,
/*
* Determine where to move the buffer.
*/
- ret = ttm_bo_mem_space(bo, placement, &mem, interruptible, no_wait_reserve, no_wait_gpu);
+ ret = ttm_bo_mem_space(bo, placement, &mem,
+ interruptible, no_wait_gpu);
if (ret)
goto out_unlock;
- ret = ttm_bo_handle_move_mem(bo, &mem, false, interruptible, no_wait_reserve, no_wait_gpu);
+ ret = ttm_bo_handle_move_mem(bo, &mem, false,
+ interruptible, no_wait_gpu);
out_unlock:
if (ret && mem.mm_node)
ttm_bo_mem_put(bo, &mem);
@@ -1114,7 +1111,7 @@ static int ttm_bo_mem_compat(struct ttm_placement *placement,
int ttm_bo_validate(struct ttm_buffer_object *bo,
struct ttm_placement *placement,
- bool interruptible, bool no_wait_reserve,
+ bool interruptible,
bool no_wait_gpu)
{
int ret;
@@ -1130,7 +1127,8 @@ int ttm_bo_validate(struct ttm_buffer_object *bo,
*/
ret = ttm_bo_mem_compat(placement, &bo->mem);
if (ret < 0) {
- ret = ttm_bo_move_buffer(bo, placement, interruptible, no_wait_reserve, no_wait_gpu);
+ ret = ttm_bo_move_buffer(bo, placement, interruptible,
+ no_wait_gpu);
if (ret)
return ret;
} else {
@@ -1243,7 +1241,7 @@ int ttm_bo_init(struct ttm_bo_device *bdev,
goto out_err;
}
- ret = ttm_bo_validate(bo, placement, interruptible, false, false);
+ ret = ttm_bo_validate(bo, placement, interruptible, false);
if (ret)
goto out_err;
@@ -1329,7 +1327,7 @@ static int ttm_bo_force_list_clean(struct ttm_bo_device *bdev,
spin_lock(&glob->lru_lock);
while (!list_empty(&man->lru)) {
spin_unlock(&glob->lru_lock);
- ret = ttm_mem_evict_first(bdev, mem_type, false, false, false);
+ ret = ttm_mem_evict_first(bdev, mem_type, false, false);
if (ret) {
if (allow_errors) {
return ret;
@@ -1797,40 +1795,25 @@ static int ttm_bo_swapout(struct ttm_mem_shrink *shrink)
uint32_t swap_placement = (TTM_PL_FLAG_CACHED | TTM_PL_FLAG_SYSTEM);
spin_lock(&glob->lru_lock);
- while (ret == -EBUSY) {
- if (unlikely(list_empty(&glob->swap_lru))) {
- spin_unlock(&glob->lru_lock);
- return -EBUSY;
- }
-
- bo = list_first_entry(&glob->swap_lru,
- struct ttm_buffer_object, swap);
- kref_get(&bo->list_kref);
+ list_for_each_entry(bo, &glob->swap_lru, swap) {
+ ret = ttm_bo_reserve_locked(bo, false, true, false, 0);
+ if (!ret)
+ break;
+ }
- if (!list_empty(&bo->ddestroy)) {
- spin_unlock(&glob->lru_lock);
- (void) ttm_bo_cleanup_refs(bo, false, false, false);
- kref_put(&bo->list_kref, ttm_bo_release_list);
- spin_lock(&glob->lru_lock);
- continue;
- }
+ if (ret) {
+ spin_unlock(&glob->lru_lock);
+ return ret;
+ }
- /**
- * Reserve buffer. Since we unlock while sleeping, we need
- * to re-check that nobody removed us from the swap-list while
- * we slept.
- */
+ kref_get(&bo->list_kref);
- ret = ttm_bo_reserve_locked(bo, false, true, false, 0);
- if (unlikely(ret == -EBUSY)) {
- spin_unlock(&glob->lru_lock);
- ttm_bo_wait_unreserved(bo, false);
- kref_put(&bo->list_kref, ttm_bo_release_list);
- spin_lock(&glob->lru_lock);
- }
+ if (!list_empty(&bo->ddestroy)) {
+ ret = ttm_bo_cleanup_refs_and_unlock(bo, false, false);
+ kref_put(&bo->list_kref, ttm_bo_release_list);
+ return ret;
}
- BUG_ON(ret != 0);
put_count = ttm_bo_del_from_lru(bo);
spin_unlock(&glob->lru_lock);
@@ -1856,7 +1839,7 @@ static int ttm_bo_swapout(struct ttm_mem_shrink *shrink)
evict_mem.mem_type = TTM_PL_SYSTEM;
ret = ttm_bo_handle_move_mem(bo, &evict_mem, true,
- false, false, false);
+ false, false);
if (unlikely(ret != 0))
goto out;
}
diff --git a/drivers/gpu/drm/ttm/ttm_bo_util.c b/drivers/gpu/drm/ttm/ttm_bo_util.c
index b9c4e515b1d8..9e9c5d2a5c74 100644
--- a/drivers/gpu/drm/ttm/ttm_bo_util.c
+++ b/drivers/gpu/drm/ttm/ttm_bo_util.c
@@ -43,7 +43,7 @@ void ttm_bo_free_old_node(struct ttm_buffer_object *bo)
}
int ttm_bo_move_ttm(struct ttm_buffer_object *bo,
- bool evict, bool no_wait_reserve,
+ bool evict,
bool no_wait_gpu, struct ttm_mem_reg *new_mem)
{
struct ttm_tt *ttm = bo->ttm;
@@ -314,7 +314,7 @@ static int ttm_copy_ttm_io_page(struct ttm_tt *ttm, void *dst,
}
int ttm_bo_move_memcpy(struct ttm_buffer_object *bo,
- bool evict, bool no_wait_reserve, bool no_wait_gpu,
+ bool evict, bool no_wait_gpu,
struct ttm_mem_reg *new_mem)
{
struct ttm_bo_device *bdev = bo->bdev;
@@ -611,7 +611,7 @@ EXPORT_SYMBOL(ttm_bo_kunmap);
int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo,
void *sync_obj,
- bool evict, bool no_wait_reserve,
+ bool evict,
bool no_wait_gpu,
struct ttm_mem_reg *new_mem)
{
diff --git a/drivers/gpu/drm/ttm/ttm_bo_vm.c b/drivers/gpu/drm/ttm/ttm_bo_vm.c
index 3ba72dbdc4bd..74705f329d99 100644
--- a/drivers/gpu/drm/ttm/ttm_bo_vm.c
+++ b/drivers/gpu/drm/ttm/ttm_bo_vm.c
@@ -259,8 +259,8 @@ int ttm_bo_mmap(struct file *filp, struct vm_area_struct *vma,
read_lock(&bdev->vm_lock);
bo = ttm_bo_vm_lookup_rb(bdev, vma->vm_pgoff,
(vma->vm_end - vma->vm_start) >> PAGE_SHIFT);
- if (likely(bo != NULL))
- ttm_bo_reference(bo);
+ if (likely(bo != NULL) && !kref_get_unless_zero(&bo->kref))
+ bo = NULL;
read_unlock(&bdev->vm_lock);
if (unlikely(bo == NULL)) {
diff --git a/drivers/gpu/drm/ttm/ttm_execbuf_util.c b/drivers/gpu/drm/ttm/ttm_execbuf_util.c
index 1986d006c264..cd9e4523dc56 100644
--- a/drivers/gpu/drm/ttm/ttm_execbuf_util.c
+++ b/drivers/gpu/drm/ttm/ttm_execbuf_util.c
@@ -213,8 +213,8 @@ void ttm_eu_fence_buffer_objects(struct list_head *list, void *sync_obj)
driver = bdev->driver;
glob = bo->glob;
- spin_lock(&bdev->fence_lock);
spin_lock(&glob->lru_lock);
+ spin_lock(&bdev->fence_lock);
list_for_each_entry(entry, list, head) {
bo = entry->bo;
@@ -223,8 +223,8 @@ void ttm_eu_fence_buffer_objects(struct list_head *list, void *sync_obj)
ttm_bo_unreserve_locked(bo);
entry->reserved = false;
}
- spin_unlock(&glob->lru_lock);
spin_unlock(&bdev->fence_lock);
+ spin_unlock(&glob->lru_lock);
list_for_each_entry(entry, list, head) {
if (entry->old_sync_obj)
diff --git a/drivers/gpu/drm/ttm/ttm_object.c b/drivers/gpu/drm/ttm/ttm_object.c
index f18eeb458139..58a5f3261c0b 100644
--- a/drivers/gpu/drm/ttm/ttm_object.c
+++ b/drivers/gpu/drm/ttm/ttm_object.c
@@ -157,11 +157,11 @@ int ttm_base_object_init(struct ttm_object_file *tfile,
base->refcount_release = refcount_release;
base->ref_obj_release = ref_obj_release;
base->object_type = object_type;
- spin_lock(&tdev->object_lock);
kref_init(&base->refcount);
- ret = drm_ht_just_insert_please(&tdev->object_hash,
- &base->hash,
- (unsigned long)base, 31, 0, 0);
+ spin_lock(&tdev->object_lock);
+ ret = drm_ht_just_insert_please_rcu(&tdev->object_hash,
+ &base->hash,
+ (unsigned long)base, 31, 0, 0);
spin_unlock(&tdev->object_lock);
if (unlikely(ret != 0))
goto out_err0;
@@ -174,7 +174,9 @@ int ttm_base_object_init(struct ttm_object_file *tfile,
return 0;
out_err1:
- (void)drm_ht_remove_item(&tdev->object_hash, &base->hash);
+ spin_lock(&tdev->object_lock);
+ (void)drm_ht_remove_item_rcu(&tdev->object_hash, &base->hash);
+ spin_unlock(&tdev->object_lock);
out_err0:
return ret;
}
@@ -187,8 +189,15 @@ static void ttm_release_base(struct kref *kref)
struct ttm_object_device *tdev = base->tfile->tdev;
spin_lock(&tdev->object_lock);
- (void)drm_ht_remove_item(&tdev->object_hash, &base->hash);
+ (void)drm_ht_remove_item_rcu(&tdev->object_hash, &base->hash);
spin_unlock(&tdev->object_lock);
+
+ /*
+ * Note: We don't use synchronize_rcu() here because it's far
+ * too slow. It's up to the user to free the object using
+ * call_rcu() or ttm_base_object_kfree().
+ */
+
if (base->refcount_release) {
ttm_object_file_unref(&base->tfile);
base->refcount_release(&base);
@@ -214,7 +223,7 @@ struct ttm_base_object *ttm_base_object_lookup(struct ttm_object_file *tfile,
int ret;
rcu_read_lock();
- ret = drm_ht_find_item(&tdev->object_hash, key, &hash);
+ ret = drm_ht_find_item_rcu(&tdev->object_hash, key, &hash);
if (likely(ret == 0)) {
base = drm_hash_entry(hash, struct ttm_base_object, hash);
diff --git a/drivers/gpu/drm/ttm/ttm_page_alloc.c b/drivers/gpu/drm/ttm/ttm_page_alloc.c
index 860dc4813e99..bd2a3b40cd12 100644
--- a/drivers/gpu/drm/ttm/ttm_page_alloc.c
+++ b/drivers/gpu/drm/ttm/ttm_page_alloc.c
@@ -749,7 +749,10 @@ static int ttm_get_pages(struct page **pages, unsigned npages, int flags,
/* clear the pages coming from the pool if requested */
if (flags & TTM_PAGE_FLAG_ZERO_ALLOC) {
list_for_each_entry(p, &plist, lru) {
- clear_page(page_address(p));
+ if (PageHighMem(p))
+ clear_highpage(p);
+ else
+ clear_page(page_address(p));
}
}
diff --git a/drivers/gpu/drm/ttm/ttm_tt.c b/drivers/gpu/drm/ttm/ttm_tt.c
index bf8260133ea9..7d759a430294 100644
--- a/drivers/gpu/drm/ttm/ttm_tt.c
+++ b/drivers/gpu/drm/ttm/ttm_tt.c
@@ -308,9 +308,7 @@ int ttm_tt_swapin(struct ttm_tt *ttm)
if (unlikely(to_page == NULL))
goto out_err;
- preempt_disable();
copy_highpage(to_page, from_page);
- preempt_enable();
page_cache_release(from_page);
}
@@ -358,9 +356,7 @@ int ttm_tt_swapout(struct ttm_tt *ttm, struct file *persistent_swap_storage)
ret = PTR_ERR(to_page);
goto out_err;
}
- preempt_disable();
copy_highpage(to_page, from_page);
- preempt_enable();
set_page_dirty(to_page);
mark_page_accessed(to_page);
page_cache_release(to_page);
diff --git a/drivers/gpu/drm/udl/udl_connector.c b/drivers/gpu/drm/udl/udl_connector.c
index 25b93de19c66..512f44add89f 100644
--- a/drivers/gpu/drm/udl/udl_connector.c
+++ b/drivers/gpu/drm/udl/udl_connector.c
@@ -140,7 +140,7 @@ int udl_connector_init(struct drm_device *dev, struct drm_encoder *encoder)
drm_sysfs_connector_add(connector);
drm_mode_connector_attach_encoder(connector, encoder);
- drm_connector_attach_property(connector,
+ drm_object_attach_property(&connector->base,
dev->mode_config.dirty_info_property,
1);
return 0;
diff --git a/drivers/gpu/drm/vmwgfx/Makefile b/drivers/gpu/drm/vmwgfx/Makefile
index 586869c8c11f..2cc6cd91ac11 100644
--- a/drivers/gpu/drm/vmwgfx/Makefile
+++ b/drivers/gpu/drm/vmwgfx/Makefile
@@ -5,6 +5,7 @@ vmwgfx-y := vmwgfx_execbuf.o vmwgfx_gmr.o vmwgfx_kms.o vmwgfx_drv.o \
vmwgfx_fb.o vmwgfx_ioctl.o vmwgfx_resource.o vmwgfx_buffer.o \
vmwgfx_fifo.o vmwgfx_irq.o vmwgfx_ldu.o vmwgfx_ttm_glue.o \
vmwgfx_overlay.o vmwgfx_marker.o vmwgfx_gmrid_manager.o \
- vmwgfx_fence.o vmwgfx_dmabuf.o vmwgfx_scrn.o
+ vmwgfx_fence.o vmwgfx_dmabuf.o vmwgfx_scrn.o vmwgfx_context.o \
+ vmwgfx_surface.o
obj-$(CONFIG_DRM_VMWGFX) := vmwgfx.o
diff --git a/drivers/gpu/drm/vmwgfx/svga3d_surfacedefs.h b/drivers/gpu/drm/vmwgfx/svga3d_surfacedefs.h
new file mode 100644
index 000000000000..8369c3ba10fe
--- /dev/null
+++ b/drivers/gpu/drm/vmwgfx/svga3d_surfacedefs.h
@@ -0,0 +1,909 @@
+/**************************************************************************
+ *
+ * Copyright © 2008-2012 VMware, Inc., Palo Alto, CA., USA
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+#ifdef __KERNEL__
+
+#include <drm/vmwgfx_drm.h>
+#define surf_size_struct struct drm_vmw_size
+
+#else /* __KERNEL__ */
+
+#ifndef ARRAY_SIZE
+#define ARRAY_SIZE(_A) (sizeof(_A) / sizeof((_A)[0]))
+#endif /* ARRAY_SIZE */
+
+#define DIV_ROUND_UP(x, y) (((x) + (y) - 1) / (y))
+#define max_t(type, x, y) ((x) > (y) ? (x) : (y))
+#define surf_size_struct SVGA3dSize
+#define u32 uint32
+
+#endif /* __KERNEL__ */
+
+#include "svga3d_reg.h"
+
+/*
+ * enum svga3d_block_desc describes the active data channels in a block.
+ *
+ * There can be at-most four active channels in a block:
+ * 1. Red, bump W, luminance and depth are stored in the first channel.
+ * 2. Green, bump V and stencil are stored in the second channel.
+ * 3. Blue and bump U are stored in the third channel.
+ * 4. Alpha and bump Q are stored in the fourth channel.
+ *
+ * Block channels can be used to store compressed and buffer data:
+ * 1. For compressed formats, only the data channel is used and its size
+ * is equal to that of a singular block in the compression scheme.
+ * 2. For buffer formats, only the data channel is used and its size is
+ * exactly one byte in length.
+ * 3. In each case the bit depth represent the size of a singular block.
+ *
+ * Note: Compressed and IEEE formats do not use the bitMask structure.
+ */
+
+enum svga3d_block_desc {
+ SVGA3DBLOCKDESC_NONE = 0, /* No channels are active */
+ SVGA3DBLOCKDESC_BLUE = 1 << 0, /* Block with red channel
+ data */
+ SVGA3DBLOCKDESC_U = 1 << 0, /* Block with bump U channel
+ data */
+ SVGA3DBLOCKDESC_UV_VIDEO = 1 << 7, /* Block with alternating video
+ U and V */
+ SVGA3DBLOCKDESC_GREEN = 1 << 1, /* Block with green channel
+ data */
+ SVGA3DBLOCKDESC_V = 1 << 1, /* Block with bump V channel
+ data */
+ SVGA3DBLOCKDESC_STENCIL = 1 << 1, /* Block with a stencil
+ channel */
+ SVGA3DBLOCKDESC_RED = 1 << 2, /* Block with blue channel
+ data */
+ SVGA3DBLOCKDESC_W = 1 << 2, /* Block with bump W channel
+ data */
+ SVGA3DBLOCKDESC_LUMINANCE = 1 << 2, /* Block with luminance channel
+ data */
+ SVGA3DBLOCKDESC_Y = 1 << 2, /* Block with video luminance
+ data */
+ SVGA3DBLOCKDESC_DEPTH = 1 << 2, /* Block with depth channel */
+ SVGA3DBLOCKDESC_ALPHA = 1 << 3, /* Block with an alpha
+ channel */
+ SVGA3DBLOCKDESC_Q = 1 << 3, /* Block with bump Q channel
+ data */
+ SVGA3DBLOCKDESC_BUFFER = 1 << 4, /* Block stores 1 byte of
+ data */
+ SVGA3DBLOCKDESC_COMPRESSED = 1 << 5, /* Block stores n bytes of
+ data depending on the
+ compression method used */
+ SVGA3DBLOCKDESC_IEEE_FP = 1 << 6, /* Block stores data in an IEEE
+ floating point
+ representation in
+ all channels */
+ SVGA3DBLOCKDESC_PLANAR_YUV = 1 << 8, /* Three separate blocks store
+ data. */
+ SVGA3DBLOCKDESC_U_VIDEO = 1 << 9, /* Block with U video data */
+ SVGA3DBLOCKDESC_V_VIDEO = 1 << 10, /* Block with V video data */
+ SVGA3DBLOCKDESC_EXP = 1 << 11, /* Shared exponent */
+ SVGA3DBLOCKDESC_SRGB = 1 << 12, /* Data is in sRGB format */
+ SVGA3DBLOCKDESC_2PLANAR_YUV = 1 << 13, /* 2 planes of Y, UV,
+ e.g., NV12. */
+ SVGA3DBLOCKDESC_3PLANAR_YUV = 1 << 14, /* 3 planes of separate
+ Y, U, V, e.g., YV12. */
+
+ SVGA3DBLOCKDESC_RG = SVGA3DBLOCKDESC_RED |
+ SVGA3DBLOCKDESC_GREEN,
+ SVGA3DBLOCKDESC_RGB = SVGA3DBLOCKDESC_RG |
+ SVGA3DBLOCKDESC_BLUE,
+ SVGA3DBLOCKDESC_RGB_SRGB = SVGA3DBLOCKDESC_RGB |
+ SVGA3DBLOCKDESC_SRGB,
+ SVGA3DBLOCKDESC_RGBA = SVGA3DBLOCKDESC_RGB |
+ SVGA3DBLOCKDESC_ALPHA,
+ SVGA3DBLOCKDESC_RGBA_SRGB = SVGA3DBLOCKDESC_RGBA |
+ SVGA3DBLOCKDESC_SRGB,
+ SVGA3DBLOCKDESC_UV = SVGA3DBLOCKDESC_U |
+ SVGA3DBLOCKDESC_V,
+ SVGA3DBLOCKDESC_UVL = SVGA3DBLOCKDESC_UV |
+ SVGA3DBLOCKDESC_LUMINANCE,
+ SVGA3DBLOCKDESC_UVW = SVGA3DBLOCKDESC_UV |
+ SVGA3DBLOCKDESC_W,
+ SVGA3DBLOCKDESC_UVWA = SVGA3DBLOCKDESC_UVW |
+ SVGA3DBLOCKDESC_ALPHA,
+ SVGA3DBLOCKDESC_UVWQ = SVGA3DBLOCKDESC_U |
+ SVGA3DBLOCKDESC_V |
+ SVGA3DBLOCKDESC_W |
+ SVGA3DBLOCKDESC_Q,
+ SVGA3DBLOCKDESC_LA = SVGA3DBLOCKDESC_LUMINANCE |
+ SVGA3DBLOCKDESC_ALPHA,
+ SVGA3DBLOCKDESC_R_FP = SVGA3DBLOCKDESC_RED |
+ SVGA3DBLOCKDESC_IEEE_FP,
+ SVGA3DBLOCKDESC_RG_FP = SVGA3DBLOCKDESC_R_FP |
+ SVGA3DBLOCKDESC_GREEN,
+ SVGA3DBLOCKDESC_RGB_FP = SVGA3DBLOCKDESC_RG_FP |
+ SVGA3DBLOCKDESC_BLUE,
+ SVGA3DBLOCKDESC_RGBA_FP = SVGA3DBLOCKDESC_RGB_FP |
+ SVGA3DBLOCKDESC_ALPHA,
+ SVGA3DBLOCKDESC_DS = SVGA3DBLOCKDESC_DEPTH |
+ SVGA3DBLOCKDESC_STENCIL,
+ SVGA3DBLOCKDESC_YUV = SVGA3DBLOCKDESC_UV_VIDEO |
+ SVGA3DBLOCKDESC_Y,
+ SVGA3DBLOCKDESC_AYUV = SVGA3DBLOCKDESC_ALPHA |
+ SVGA3DBLOCKDESC_Y |
+ SVGA3DBLOCKDESC_U_VIDEO |
+ SVGA3DBLOCKDESC_V_VIDEO,
+ SVGA3DBLOCKDESC_RGBE = SVGA3DBLOCKDESC_RGB |
+ SVGA3DBLOCKDESC_EXP,
+ SVGA3DBLOCKDESC_COMPRESSED_SRGB = SVGA3DBLOCKDESC_COMPRESSED |
+ SVGA3DBLOCKDESC_SRGB,
+ SVGA3DBLOCKDESC_NV12 = SVGA3DBLOCKDESC_PLANAR_YUV |
+ SVGA3DBLOCKDESC_2PLANAR_YUV,
+ SVGA3DBLOCKDESC_YV12 = SVGA3DBLOCKDESC_PLANAR_YUV |
+ SVGA3DBLOCKDESC_3PLANAR_YUV,
+};
+
+/*
+ * SVGA3dSurfaceDesc describes the actual pixel data.
+ *
+ * This structure provides the following information:
+ * 1. Block description.
+ * 2. Dimensions of a block in the surface.
+ * 3. Size of block in bytes.
+ * 4. Bit depth of the pixel data.
+ * 5. Channel bit depths and masks (if applicable).
+ */
+#define SVGA3D_CHANNEL_DEF(type) \
+ struct { \
+ union { \
+ type blue; \
+ type u; \
+ type uv_video; \
+ type u_video; \
+ }; \
+ union { \
+ type green; \
+ type v; \
+ type stencil; \
+ type v_video; \
+ }; \
+ union { \
+ type red; \
+ type w; \
+ type luminance; \
+ type y; \
+ type depth; \
+ type data; \
+ }; \
+ union { \
+ type alpha; \
+ type q; \
+ type exp; \
+ }; \
+ }
+
+struct svga3d_surface_desc {
+ enum svga3d_block_desc block_desc;
+ surf_size_struct block_size;
+ u32 bytes_per_block;
+ u32 pitch_bytes_per_block;
+
+ struct {
+ u32 total;
+ SVGA3D_CHANNEL_DEF(uint8);
+ } bit_depth;
+
+ struct {
+ SVGA3D_CHANNEL_DEF(uint8);
+ } bit_offset;
+};
+
+static const struct svga3d_surface_desc svga3d_surface_descs[] = {
+ {SVGA3DBLOCKDESC_NONE,
+ {1, 1, 1}, 0, 0, {0, {{0}, {0}, {0}, {0} } },
+ {{{0}, {0}, {0}, {0} } } }, /* SVGA3D_FORMAT_INVALID */
+
+ {SVGA3DBLOCKDESC_RGB,
+ {1, 1, 1}, 4, 4, {24, {{8}, {8}, {8}, {0} } },
+ {{{0}, {8}, {16}, {24} } } }, /* SVGA3D_X8R8G8B8 */
+
+ {SVGA3DBLOCKDESC_RGBA,
+ {1, 1, 1}, 4, 4, {32, {{8}, {8}, {8}, {8} } },
+ {{{0}, {8}, {16}, {24} } } }, /* SVGA3D_A8R8G8B8 */
+
+ {SVGA3DBLOCKDESC_RGB,
+ {1, 1, 1}, 2, 2, {16, {{5}, {6}, {5}, {0} } },
+ {{{0}, {5}, {11}, {0} } } }, /* SVGA3D_R5G6B5 */
+
+ {SVGA3DBLOCKDESC_RGB,
+ {1, 1, 1}, 2, 2, {15, {{5}, {5}, {5}, {0} } },
+ {{{0}, {5}, {10}, {0} } } }, /* SVGA3D_X1R5G5B5 */
+
+ {SVGA3DBLOCKDESC_RGBA,
+ {1, 1, 1}, 2, 2, {16, {{5}, {5}, {5}, {1} } },
+ {{{0}, {5}, {10}, {15} } } }, /* SVGA3D_A1R5G5B5 */
+
+ {SVGA3DBLOCKDESC_RGBA,
+ {1, 1, 1}, 2, 2, {16, {{4}, {4}, {4}, {4} } },
+ {{{0}, {4}, {8}, {12} } } }, /* SVGA3D_A4R4G4B4 */
+
+ {SVGA3DBLOCKDESC_DEPTH,
+ {1, 1, 1}, 4, 4, {32, {{0}, {0}, {32}, {0} } },
+ {{{0}, {0}, {0}, {0} } } }, /* SVGA3D_Z_D32 */
+
+ {SVGA3DBLOCKDESC_DEPTH,
+ {1, 1, 1}, 2, 2, {16, {{0}, {0}, {16}, {0} } },
+ {{{0}, {0}, {0}, {0} } } }, /* SVGA3D_Z_D16 */
+
+ {SVGA3DBLOCKDESC_DS,
+ {1, 1, 1}, 4, 4, {32, {{0}, {8}, {24}, {0} } },
+ {{{0}, {24}, {0}, {0} } } }, /* SVGA3D_Z_D24S8 */
+
+ {SVGA3DBLOCKDESC_DS,
+ {1, 1, 1}, 2, 2, {16, {{0}, {1}, {15}, {0} } },
+ {{{0}, {15}, {0}, {0} } } }, /* SVGA3D_Z_D15S1 */
+
+ {SVGA3DBLOCKDESC_LUMINANCE,
+ {1, 1, 1}, 1, 1, {8, {{0}, {0}, {8}, {0} } },
+ {{{0}, {0}, {0}, {0} } } }, /* SVGA3D_LUMINANCE8 */
+
+ {SVGA3DBLOCKDESC_LA,
+ {1, 1, 1}, 1, 1, {8, {{0}, {0}, {4}, {4} } },
+ {{{0}, {0}, {0}, {4} } } }, /* SVGA3D_LUMINANCE4_ALPHA4 */
+
+ {SVGA3DBLOCKDESC_LUMINANCE,
+ {1, 1, 1}, 2, 2, {16, {{0}, {0}, {16}, {0} } },
+ {{{0}, {0}, {0}, {0} } } }, /* SVGA3D_LUMINANCE16 */
+
+ {SVGA3DBLOCKDESC_LA,
+ {1, 1, 1}, 2, 2, {16, {{0}, {0}, {8}, {8} } },
+ {{{0}, {0}, {0}, {8} } } }, /* SVGA3D_LUMINANCE8_ALPHA8 */
+
+ {SVGA3DBLOCKDESC_COMPRESSED,
+ {4, 4, 1}, 8, 8, {64, {{0}, {0}, {64}, {0} } },
+ {{{0}, {0}, {0}, {0} } } }, /* SVGA3D_DXT1 */
+
+ {SVGA3DBLOCKDESC_COMPRESSED,
+ {4, 4, 1}, 16, 16, {128, {{0}, {0}, {128}, {0} } },
+ {{{0}, {0}, {0}, {0} } } }, /* SVGA3D_DXT2 */
+
+ {SVGA3DBLOCKDESC_COMPRESSED,
+ {4, 4, 1}, 16, 16, {128, {{0}, {0}, {128}, {0} } },
+ {{{0}, {0}, {0}, {0} } } }, /* SVGA3D_DXT3 */
+
+ {SVGA3DBLOCKDESC_COMPRESSED,
+ {4, 4, 1}, 16, 16, {128, {{0}, {0}, {128}, {0} } },
+ {{{0}, {0}, {0}, {0} } } }, /* SVGA3D_DXT4 */
+
+ {SVGA3DBLOCKDESC_COMPRESSED,
+ {4, 4, 1}, 16, 16, {128, {{0}, {0}, {128}, {0} } },
+ {{{0}, {0}, {0}, {0} } } }, /* SVGA3D_DXT5 */
+
+ {SVGA3DBLOCKDESC_UV,
+ {1, 1, 1}, 2, 2, {16, {{0}, {0}, {8}, {8} } },
+ {{{0}, {0}, {0}, {8} } } }, /* SVGA3D_BUMPU8V8 */
+
+ {SVGA3DBLOCKDESC_UVL,
+ {1, 1, 1}, 2, 2, {16, {{5}, {5}, {6}, {0} } },
+ {{{11}, {6}, {0}, {0} } } }, /* SVGA3D_BUMPL6V5U5 */
+
+ {SVGA3DBLOCKDESC_UVL,
+ {1, 1, 1}, 4, 4, {32, {{8}, {8}, {8}, {0} } },
+ {{{16}, {8}, {0}, {0} } } }, /* SVGA3D_BUMPX8L8V8U8 */
+
+ {SVGA3DBLOCKDESC_UVL,
+ {1, 1, 1}, 3, 3, {24, {{8}, {8}, {8}, {0} } },
+ {{{16}, {8}, {0}, {0} } } }, /* SVGA3D_BUMPL8V8U8 */
+
+ {SVGA3DBLOCKDESC_RGBA_FP,
+ {1, 1, 1}, 8, 8, {64, {{16}, {16}, {16}, {16} } },
+ {{{32}, {16}, {0}, {48} } } }, /* SVGA3D_ARGB_S10E5 */
+
+ {SVGA3DBLOCKDESC_RGBA_FP,
+ {1, 1, 1}, 16, 16, {128, {{32}, {32}, {32}, {32} } },
+ {{{64}, {32}, {0}, {96} } } }, /* SVGA3D_ARGB_S23E8 */
+
+ {SVGA3DBLOCKDESC_RGBA,
+ {1, 1, 1}, 4, 4, {32, {{10}, {10}, {10}, {2} } },
+ {{{0}, {10}, {20}, {30} } } }, /* SVGA3D_A2R10G10B10 */
+
+ {SVGA3DBLOCKDESC_UV,
+ {1, 1, 1}, 2, 2, {16, {{8}, {8}, {0}, {0} } },
+ {{{8}, {0}, {0}, {0} } } }, /* SVGA3D_V8U8 */
+
+ {SVGA3DBLOCKDESC_UVWQ,
+ {1, 1, 1}, 4, 4, {32, {{8}, {8}, {8}, {8} } },
+ {{{24}, {16}, {8}, {0} } } }, /* SVGA3D_Q8W8V8U8 */
+
+ {SVGA3DBLOCKDESC_UV,
+ {1, 1, 1}, 2, 2, {16, {{8}, {8}, {0}, {0} } },
+ {{{8}, {0}, {0}, {0} } } }, /* SVGA3D_CxV8U8 */
+
+ {SVGA3DBLOCKDESC_UVL,
+ {1, 1, 1}, 4, 4, {24, {{8}, {8}, {8}, {0} } },
+ {{{16}, {8}, {0}, {0} } } }, /* SVGA3D_X8L8V8U8 */
+
+ {SVGA3DBLOCKDESC_UVWA,
+ {1, 1, 1}, 4, 4, {32, {{10}, {10}, {10}, {2} } },
+ {{{0}, {10}, {20}, {30} } } }, /* SVGA3D_A2W10V10U10 */
+
+ {SVGA3DBLOCKDESC_ALPHA,
+ {1, 1, 1}, 1, 1, {8, {{0}, {0}, {0}, {8} } },
+ {{{0}, {0}, {0}, {0} } } }, /* SVGA3D_ALPHA8 */
+
+ {SVGA3DBLOCKDESC_R_FP,
+ {1, 1, 1}, 2, 2, {16, {{0}, {0}, {16}, {0} } },
+ {{{0}, {0}, {0}, {0} } } }, /* SVGA3D_R_S10E5 */
+
+ {SVGA3DBLOCKDESC_R_FP,
+ {1, 1, 1}, 4, 4, {32, {{0}, {0}, {32}, {0} } },
+ {{{0}, {0}, {0}, {0} } } }, /* SVGA3D_R_S23E8 */
+
+ {SVGA3DBLOCKDESC_RG_FP,
+ {1, 1, 1}, 4, 4, {32, {{0}, {16}, {16}, {0} } },
+ {{{0}, {16}, {0}, {0} } } }, /* SVGA3D_RG_S10E5 */
+
+ {SVGA3DBLOCKDESC_RG_FP,
+ {1, 1, 1}, 8, 8, {64, {{0}, {32}, {32}, {0} } },
+ {{{0}, {32}, {0}, {0} } } }, /* SVGA3D_RG_S23E8 */
+
+ {SVGA3DBLOCKDESC_BUFFER,
+ {1, 1, 1}, 1, 1, {8, {{0}, {0}, {8}, {0} } },
+ {{{0}, {0}, {0}, {0} } } }, /* SVGA3D_BUFFER */
+
+ {SVGA3DBLOCKDESC_DEPTH,
+ {1, 1, 1}, 4, 4, {32, {{0}, {0}, {24}, {0} } },
+ {{{0}, {24}, {0}, {0} } } }, /* SVGA3D_Z_D24X8 */
+
+ {SVGA3DBLOCKDESC_UV,
+ {1, 1, 1}, 4, 4, {32, {{16}, {16}, {0}, {0} } },
+ {{{16}, {0}, {0}, {0} } } }, /* SVGA3D_V16U16 */
+
+ {SVGA3DBLOCKDESC_RG,
+ {1, 1, 1}, 4, 4, {32, {{0}, {16}, {16}, {0} } },
+ {{{0}, {0}, {16}, {0} } } }, /* SVGA3D_G16R16 */
+
+ {SVGA3DBLOCKDESC_RGBA,
+ {1, 1, 1}, 8, 8, {64, {{16}, {16}, {16}, {16} } },
+ {{{32}, {16}, {0}, {48} } } }, /* SVGA3D_A16B16G16R16 */
+
+ {SVGA3DBLOCKDESC_YUV,
+ {1, 1, 1}, 2, 2, {16, {{8}, {0}, {8}, {0} } },
+ {{{0}, {0}, {8}, {0} } } }, /* SVGA3D_UYVY */
+
+ {SVGA3DBLOCKDESC_YUV,
+ {1, 1, 1}, 2, 2, {16, {{8}, {0}, {8}, {0} } },
+ {{{8}, {0}, {0}, {0} } } }, /* SVGA3D_YUY2 */
+
+ {SVGA3DBLOCKDESC_NV12,
+ {2, 2, 1}, 6, 2, {48, {{0}, {0}, {48}, {0} } },
+ {{{0}, {0}, {0}, {0} } } }, /* SVGA3D_NV12 */
+
+ {SVGA3DBLOCKDESC_AYUV,
+ {1, 1, 1}, 4, 4, {32, {{8}, {8}, {8}, {8} } },
+ {{{0}, {8}, {16}, {24} } } }, /* SVGA3D_AYUV */
+
+ {SVGA3DBLOCKDESC_RGBA,
+ {1, 1, 1}, 16, 16, {128, {{32}, {32}, {32}, {32} } },
+ {{{64}, {32}, {0}, {96} } } }, /* SVGA3D_R32G32B32A32_TYPELESS */
+
+ {SVGA3DBLOCKDESC_RGBA,
+ {1, 1, 1}, 16, 16, {128, {{32}, {32}, {32}, {32} } },
+ {{{64}, {32}, {0}, {96} } } }, /* SVGA3D_R32G32B32A32_UINT */
+
+ {SVGA3DBLOCKDESC_UVWQ,
+ {1, 1, 1}, 16, 16, {128, {{32}, {32}, {32}, {32} } },
+ {{{64}, {32}, {0}, {96} } } }, /* SVGA3D_R32G32B32A32_SINT */
+
+ {SVGA3DBLOCKDESC_RGB,
+ {1, 1, 1}, 12, 12, {96, {{32}, {32}, {32}, {0} } },
+ {{{64}, {32}, {0}, {0} } } }, /* SVGA3D_R32G32B32_TYPELESS */
+
+ {SVGA3DBLOCKDESC_RGB_FP,
+ {1, 1, 1}, 12, 12, {96, {{32}, {32}, {32}, {0} } },
+ {{{64}, {32}, {0}, {0} } } }, /* SVGA3D_R32G32B32_FLOAT */
+
+ {SVGA3DBLOCKDESC_RGB,
+ {1, 1, 1}, 12, 12, {96, {{32}, {32}, {32}, {0} } },
+ {{{64}, {32}, {0}, {0} } } }, /* SVGA3D_R32G32B32_UINT */
+
+ {SVGA3DBLOCKDESC_UVW,
+ {1, 1, 1}, 12, 12, {96, {{32}, {32}, {32}, {0} } },
+ {{{64}, {32}, {0}, {0} } } }, /* SVGA3D_R32G32B32_SINT */
+
+ {SVGA3DBLOCKDESC_RGBA,
+ {1, 1, 1}, 8, 8, {64, {{16}, {16}, {16}, {16} } },
+ {{{32}, {16}, {0}, {48} } } }, /* SVGA3D_R16G16B16A16_TYPELESS */
+
+ {SVGA3DBLOCKDESC_RGBA,
+ {1, 1, 1}, 8, 8, {64, {{16}, {16}, {16}, {16} } },
+ {{{32}, {16}, {0}, {48} } } }, /* SVGA3D_R16G16B16A16_UINT */
+
+ {SVGA3DBLOCKDESC_UVWQ,
+ {1, 1, 1}, 8, 8, {64, {{16}, {16}, {16}, {16} } },
+ {{{32}, {16}, {0}, {48} } } }, /* SVGA3D_R16G16B16A16_SNORM */
+
+ {SVGA3DBLOCKDESC_UVWQ,
+ {1, 1, 1}, 8, 8, {64, {{16}, {16}, {16}, {16} } },
+ {{{32}, {16}, {0}, {48} } } }, /* SVGA3D_R16G16B16A16_SINT */
+
+ {SVGA3DBLOCKDESC_RG,
+ {1, 1, 1}, 8, 8, {64, {{0}, {32}, {32}, {0} } },
+ {{{0}, {32}, {0}, {0} } } }, /* SVGA3D_R32G32_TYPELESS */
+
+ {SVGA3DBLOCKDESC_RG,
+ {1, 1, 1}, 8, 8, {64, {{0}, {32}, {32}, {0} } },
+ {{{0}, {32}, {0}, {0} } } }, /* SVGA3D_R32G32_UINT */
+
+ {SVGA3DBLOCKDESC_UV,
+ {1, 1, 1}, 8, 8, {64, {{0}, {32}, {32}, {0} } },
+ {{{0}, {32}, {0}, {0} } } }, /* SVGA3D_R32G32_SINT */
+
+ {SVGA3DBLOCKDESC_RG,
+ {1, 1, 1}, 8, 8, {64, {{0}, {8}, {32}, {0} } },
+ {{{0}, {32}, {0}, {0} } } }, /* SVGA3D_R32G8X24_TYPELESS */
+
+ {SVGA3DBLOCKDESC_DS,
+ {1, 1, 1}, 8, 8, {64, {{0}, {8}, {32}, {0} } },
+ {{{0}, {32}, {0}, {0} } } }, /* SVGA3D_D32_FLOAT_S8X24_UINT */
+
+ {SVGA3DBLOCKDESC_R_FP,
+ {1, 1, 1}, 8, 8, {64, {{0}, {0}, {32}, {0} } },
+ {{{0}, {0}, {0}, {0} } } }, /* SVGA3D_R32_FLOAT_X8_X24_TYPELESS */
+
+ {SVGA3DBLOCKDESC_GREEN,
+ {1, 1, 1}, 8, 8, {64, {{0}, {8}, {0}, {0} } },
+ {{{0}, {32}, {0}, {0} } } }, /* SVGA3D_X32_TYPELESS_G8X24_UINT */
+
+ {SVGA3DBLOCKDESC_RGBA,
+ {1, 1, 1}, 4, 4, {32, {{10}, {10}, {10}, {2} } },
+ {{{0}, {10}, {20}, {30} } } }, /* SVGA3D_R10G10B10A2_TYPELESS */
+
+ {SVGA3DBLOCKDESC_RGBA,
+ {1, 1, 1}, 4, 4, {32, {{10}, {10}, {10}, {2} } },
+ {{{0}, {10}, {20}, {30} } } }, /* SVGA3D_R10G10B10A2_UINT */
+
+ {SVGA3DBLOCKDESC_RGB_FP,
+ {1, 1, 1}, 4, 4, {32, {{10}, {11}, {11}, {0} } },
+ {{{0}, {10}, {21}, {0} } } }, /* SVGA3D_R11G11B10_FLOAT */
+
+ {SVGA3DBLOCKDESC_RGBA,
+ {1, 1, 1}, 4, 4, {32, {{8}, {8}, {8}, {8} } },
+ {{{16}, {8}, {0}, {24} } } }, /* SVGA3D_R8G8B8A8_TYPELESS */
+
+ {SVGA3DBLOCKDESC_RGBA,
+ {1, 1, 1}, 4, 4, {32, {{8}, {8}, {8}, {8} } },
+ {{{16}, {8}, {0}, {24} } } }, /* SVGA3D_R8G8B8A8_UNORM */
+
+ {SVGA3DBLOCKDESC_RGBA_SRGB,
+ {1, 1, 1}, 4, 4, {32, {{8}, {8}, {8}, {8} } },
+ {{{16}, {8}, {0}, {24} } } }, /* SVGA3D_R8G8B8A8_UNORM_SRGB */
+
+ {SVGA3DBLOCKDESC_RGBA,
+ {1, 1, 1}, 4, 4, {32, {{8}, {8}, {8}, {8} } },
+ {{{16}, {8}, {0}, {24} } } }, /* SVGA3D_R8G8B8A8_UINT */
+
+ {SVGA3DBLOCKDESC_RGBA,
+ {1, 1, 1}, 4, 4, {32, {{8}, {8}, {8}, {8} } },
+ {{{16}, {8}, {0}, {24} } } }, /* SVGA3D_R8G8B8A8_SINT */
+
+ {SVGA3DBLOCKDESC_RG,
+ {1, 1, 1}, 4, 4, {32, {{0}, {16}, {16}, {0} } },
+ {{{0}, {16}, {0}, {0} } } }, /* SVGA3D_R16G16_TYPELESS */
+
+ {SVGA3DBLOCKDESC_RG_FP,
+ {1, 1, 1}, 4, 4, {32, {{0}, {16}, {16}, {0} } },
+ {{{0}, {16}, {0}, {0} } } }, /* SVGA3D_R16G16_UINT */
+
+ {SVGA3DBLOCKDESC_UV,
+ {1, 1, 1}, 4, 4, {32, {{0}, {16}, {16}, {0} } },
+ {{{0}, {16}, {0}, {0} } } }, /* SVGA3D_R16G16_SINT */
+
+ {SVGA3DBLOCKDESC_RED,
+ {1, 1, 1}, 4, 4, {32, {{0}, {0}, {32}, {0} } },
+ {{{0}, {0}, {0}, {0} } } }, /* SVGA3D_R32_TYPELESS */
+
+ {SVGA3DBLOCKDESC_DEPTH,
+ {1, 1, 1}, 4, 4, {32, {{0}, {0}, {32}, {0} } },
+ {{{0}, {0}, {0}, {0} } } }, /* SVGA3D_D32_FLOAT */
+
+ {SVGA3DBLOCKDESC_RED,
+ {1, 1, 1}, 4, 4, {32, {{0}, {0}, {32}, {0} } },
+ {{{0}, {0}, {0}, {0} } } }, /* SVGA3D_R32_UINT */
+
+ {SVGA3DBLOCKDESC_RED,
+ {1, 1, 1}, 4, 4, {32, {{0}, {0}, {32}, {0} } },
+ {{{0}, {0}, {0}, {0} } } }, /* SVGA3D_R32_SINT */
+
+ {SVGA3DBLOCKDESC_RG,
+ {1, 1, 1}, 4, 4, {32, {{0}, {8}, {24}, {0} } },
+ {{{0}, {24}, {0}, {0} } } }, /* SVGA3D_R24G8_TYPELESS */
+
+ {SVGA3DBLOCKDESC_DS,
+ {1, 1, 1}, 4, 4, {32, {{0}, {8}, {24}, {0} } },
+ {{{0}, {24}, {0}, {0} } } }, /* SVGA3D_D24_UNORM_S8_UINT */
+
+ {SVGA3DBLOCKDESC_RED,
+ {1, 1, 1}, 4, 4, {32, {{0}, {0}, {24}, {0} } },
+ {{{0}, {0}, {0}, {0} } } }, /* SVGA3D_R24_UNORM_X8_TYPELESS */
+
+ {SVGA3DBLOCKDESC_GREEN,
+ {1, 1, 1}, 4, 4, {32, {{0}, {8}, {0}, {0} } },
+ {{{0}, {24}, {0}, {0} } } }, /* SVGA3D_X24_TYPELESS_G8_UINT */
+
+ {SVGA3DBLOCKDESC_RG,
+ {1, 1, 1}, 2, 2, {16, {{0}, {8}, {8}, {0} } },
+ {{{0}, {8}, {0}, {0} } } }, /* SVGA3D_R8G8_TYPELESS */
+
+ {SVGA3DBLOCKDESC_RG,
+ {1, 1, 1}, 2, 2, {16, {{0}, {8}, {8}, {0} } },
+ {{{0}, {8}, {0}, {0} } } }, /* SVGA3D_R8G8_UNORM */
+
+ {SVGA3DBLOCKDESC_RG,
+ {1, 1, 1}, 2, 2, {16, {{0}, {8}, {8}, {0} } },
+ {{{0}, {8}, {0}, {0} } } }, /* SVGA3D_R8G8_UINT */
+
+ {SVGA3DBLOCKDESC_UV,
+ {1, 1, 1}, 2, 2, {16, {{0}, {8}, {8}, {0} } },
+ {{{0}, {8}, {0}, {0} } } }, /* SVGA3D_R8G8_SINT */
+
+ {SVGA3DBLOCKDESC_RED,
+ {1, 1, 1}, 2, 2, {16, {{0}, {0}, {16}, {0} } },
+ {{{0}, {0}, {0}, {0} } } }, /* SVGA3D_R16_TYPELESS */
+
+ {SVGA3DBLOCKDESC_RED,
+ {1, 1, 1}, 2, 2, {16, {{0}, {0}, {16}, {0} } },
+ {{{0}, {0}, {0}, {0} } } }, /* SVGA3D_R16_UNORM */
+
+ {SVGA3DBLOCKDESC_RED,
+ {1, 1, 1}, 2, 2, {16, {{0}, {0}, {16}, {0} } },
+ {{{0}, {0}, {0}, {0} } } }, /* SVGA3D_R16_UINT */
+
+ {SVGA3DBLOCKDESC_U,
+ {1, 1, 1}, 2, 2, {16, {{0}, {0}, {16}, {0} } },
+ {{{0}, {0}, {0}, {0} } } }, /* SVGA3D_R16_SNORM */
+
+ {SVGA3DBLOCKDESC_U,
+ {1, 1, 1}, 2, 2, {16, {{0}, {0}, {16}, {0} } },
+ {{{0}, {0}, {0}, {0} } } }, /* SVGA3D_R16_SINT */
+
+ {SVGA3DBLOCKDESC_RED,
+ {1, 1, 1}, 1, 1, {8, {{0}, {0}, {8}, {0} } },
+ {{{0}, {0}, {0}, {0} } } }, /* SVGA3D_R8_TYPELESS */
+
+ {SVGA3DBLOCKDESC_RED,
+ {1, 1, 1}, 1, 1, {8, {{0}, {0}, {8}, {0} } },
+ {{{0}, {0}, {0}, {0} } } }, /* SVGA3D_R8_UNORM */
+
+ {SVGA3DBLOCKDESC_RED,
+ {1, 1, 1}, 1, 1, {8, {{0}, {0}, {8}, {0} } },
+ {{{0}, {0}, {0}, {0} } } }, /* SVGA3D_R8_UINT */
+
+ {SVGA3DBLOCKDESC_U,
+ {1, 1, 1}, 1, 1, {8, {{0}, {0}, {8}, {0} } },
+ {{{0}, {0}, {0}, {0} } } }, /* SVGA3D_R8_SNORM */
+
+ {SVGA3DBLOCKDESC_U,
+ {1, 1, 1}, 1, 1, {8, {{0}, {0}, {8}, {0} } },
+ {{{0}, {0}, {0}, {0} } } }, /* SVGA3D_R8_SINT */
+
+ {SVGA3DBLOCKDESC_RED,
+ {8, 1, 1}, 1, 1, {8, {{0}, {0}, {8}, {0} } },
+ {{{0}, {0}, {0}, {0} } } }, /* SVGA3D_R1_UNORM */
+
+ {SVGA3DBLOCKDESC_RGBE,
+ {1, 1, 1}, 4, 4, {32, {{9}, {9}, {9}, {5} } },
+ {{{18}, {9}, {0}, {27} } } }, /* SVGA3D_R9G9B9E5_SHAREDEXP */
+
+ {SVGA3DBLOCKDESC_RG,
+ {1, 1, 1}, 2, 2, {16, {{0}, {8}, {8}, {0} } },
+ {{{0}, {8}, {0}, {0} } } }, /* SVGA3D_R8G8_B8G8_UNORM */
+
+ {SVGA3DBLOCKDESC_RG,
+ {1, 1, 1}, 2, 2, {16, {{0}, {8}, {8}, {0} } },
+ {{{0}, {8}, {0}, {0} } } }, /* SVGA3D_G8R8_G8B8_UNORM */
+
+ {SVGA3DBLOCKDESC_COMPRESSED,
+ {4, 4, 1}, 8, 8, {64, {{0}, {0}, {64}, {0} } },
+ {{{0}, {0}, {0}, {0} } } }, /* SVGA3D_BC1_TYPELESS */
+
+ {SVGA3DBLOCKDESC_COMPRESSED_SRGB,
+ {4, 4, 1}, 8, 8, {64, {{0}, {0}, {64}, {0} } },
+ {{{0}, {0}, {0}, {0} } } }, /* SVGA3D_BC1_UNORM_SRGB */
+
+ {SVGA3DBLOCKDESC_COMPRESSED,
+ {4, 4, 1}, 16, 16, {128, {{0}, {0}, {128}, {0} } },
+ {{{0}, {0}, {0}, {0} } } }, /* SVGA3D_BC2_TYPELESS */
+
+ {SVGA3DBLOCKDESC_COMPRESSED_SRGB,
+ {4, 4, 1}, 16, 16, {128, {{0}, {0}, {128}, {0} } },
+ {{{0}, {0}, {0}, {0} } } }, /* SVGA3D_BC2_UNORM_SRGB */
+
+ {SVGA3DBLOCKDESC_COMPRESSED,
+ {4, 4, 1}, 16, 16, {128, {{0}, {0}, {128}, {0} } },
+ {{{0}, {0}, {0}, {0} } } }, /* SVGA3D_BC3_TYPELESS */
+
+ {SVGA3DBLOCKDESC_COMPRESSED_SRGB,
+ {4, 4, 1}, 16, 16, {128, {{0}, {0}, {128}, {0} } },
+ {{{0}, {0}, {0}, {0} } } }, /* SVGA3D_BC3_UNORM_SRGB */
+
+ {SVGA3DBLOCKDESC_COMPRESSED,
+ {4, 4, 1}, 8, 8, {64, {{0}, {0}, {64}, {0} } },
+ {{{0}, {0}, {0}, {0} } } }, /* SVGA3D_BC4_TYPELESS */
+
+ {SVGA3DBLOCKDESC_COMPRESSED,
+ {4, 4, 1}, 8, 8, {64, {{0}, {0}, {64}, {0} } },
+ {{{0}, {0}, {0}, {0} } } }, /* SVGA3D_BC4_UNORM */
+
+ {SVGA3DBLOCKDESC_COMPRESSED,
+ {4, 4, 1}, 8, 8, {64, {{0}, {0}, {64}, {0} } },
+ {{{0}, {0}, {0}, {0} } } }, /* SVGA3D_BC4_SNORM */
+
+ {SVGA3DBLOCKDESC_COMPRESSED,
+ {4, 4, 1}, 16, 16, {128, {{0}, {0}, {128}, {0} } },
+ {{{0}, {0}, {0}, {0} } } }, /* SVGA3D_BC5_TYPELESS */
+
+ {SVGA3DBLOCKDESC_COMPRESSED,
+ {4, 4, 1}, 16, 16, {128, {{0}, {0}, {128}, {0} } },
+ {{{0}, {0}, {0}, {0} } } }, /* SVGA3D_BC5_UNORM */
+
+ {SVGA3DBLOCKDESC_COMPRESSED,
+ {4, 4, 1}, 16, 16, {128, {{0}, {0}, {128}, {0} } },
+ {{{0}, {0}, {0}, {0} } } }, /* SVGA3D_BC5_SNORM */
+
+ {SVGA3DBLOCKDESC_RGBA,
+ {1, 1, 1}, 4, 4, {32, {{10}, {10}, {10}, {2} } },
+ {{{0}, {10}, {20}, {30} } } }, /* SVGA3D_R10G10B10_XR_BIAS_A2_UNORM */
+
+ {SVGA3DBLOCKDESC_RGBA,
+ {1, 1, 1}, 4, 4, {32, {{8}, {8}, {8}, {8} } },
+ {{{0}, {8}, {16}, {24} } } }, /* SVGA3D_B8G8R8A8_TYPELESS */
+
+ {SVGA3DBLOCKDESC_RGBA_SRGB,
+ {1, 1, 1}, 4, 4, {32, {{8}, {8}, {8}, {8} } },
+ {{{0}, {8}, {16}, {24} } } }, /* SVGA3D_B8G8R8A8_UNORM_SRGB */
+
+ {SVGA3DBLOCKDESC_RGB,
+ {1, 1, 1}, 4, 4, {24, {{8}, {8}, {8}, {0} } },
+ {{{0}, {8}, {16}, {24} } } }, /* SVGA3D_B8G8R8X8_TYPELESS */
+
+ {SVGA3DBLOCKDESC_RGB_SRGB,
+ {1, 1, 1}, 4, 4, {24, {{8}, {8}, {8}, {0} } },
+ {{{0}, {8}, {16}, {24} } } }, /* SVGA3D_B8G8R8X8_UNORM_SRGB */
+
+ {SVGA3DBLOCKDESC_DEPTH,
+ {1, 1, 1}, 2, 2, {16, {{0}, {0}, {16}, {0} } },
+ {{{0}, {0}, {0}, {0} } } }, /* SVGA3D_Z_DF16 */
+
+ {SVGA3DBLOCKDESC_DS,
+ {1, 1, 1}, 4, 4, {32, {{0}, {8}, {24}, {0} } },
+ {{{0}, {24}, {0}, {0} } } }, /* SVGA3D_Z_DF24 */
+
+ {SVGA3DBLOCKDESC_DS,
+ {1, 1, 1}, 4, 4, {32, {{0}, {8}, {24}, {0} } },
+ {{{0}, {24}, {0}, {0} } } }, /* SVGA3D_Z_D24S8_INT */
+};
+
+static inline u32 clamped_umul32(u32 a, u32 b)
+{
+ uint64_t tmp = (uint64_t) a*b;
+ return (tmp > (uint64_t) ((u32) -1)) ? (u32) -1 : tmp;
+}
+
+static inline const struct svga3d_surface_desc *
+svga3dsurface_get_desc(SVGA3dSurfaceFormat format)
+{
+ if (format < ARRAY_SIZE(svga3d_surface_descs))
+ return &svga3d_surface_descs[format];
+
+ return &svga3d_surface_descs[SVGA3D_FORMAT_INVALID];
+}
+
+/*
+ *----------------------------------------------------------------------
+ *
+ * svga3dsurface_get_mip_size --
+ *
+ * Given a base level size and the mip level, compute the size of
+ * the mip level.
+ *
+ * Results:
+ * See above.
+ *
+ * Side effects:
+ * None.
+ *
+ *----------------------------------------------------------------------
+ */
+
+static inline surf_size_struct
+svga3dsurface_get_mip_size(surf_size_struct base_level, u32 mip_level)
+{
+ surf_size_struct size;
+
+ size.width = max_t(u32, base_level.width >> mip_level, 1);
+ size.height = max_t(u32, base_level.height >> mip_level, 1);
+ size.depth = max_t(u32, base_level.depth >> mip_level, 1);
+ return size;
+}
+
+static inline void
+svga3dsurface_get_size_in_blocks(const struct svga3d_surface_desc *desc,
+ const surf_size_struct *pixel_size,
+ surf_size_struct *block_size)
+{
+ block_size->width = DIV_ROUND_UP(pixel_size->width,
+ desc->block_size.width);
+ block_size->height = DIV_ROUND_UP(pixel_size->height,
+ desc->block_size.height);
+ block_size->depth = DIV_ROUND_UP(pixel_size->depth,
+ desc->block_size.depth);
+}
+
+static inline bool
+svga3dsurface_is_planar_surface(const struct svga3d_surface_desc *desc)
+{
+ return (desc->block_desc & SVGA3DBLOCKDESC_PLANAR_YUV) != 0;
+}
+
+static inline u32
+svga3dsurface_calculate_pitch(const struct svga3d_surface_desc *desc,
+ const surf_size_struct *size)
+{
+ u32 pitch;
+ surf_size_struct blocks;
+
+ svga3dsurface_get_size_in_blocks(desc, size, &blocks);
+
+ pitch = blocks.width * desc->pitch_bytes_per_block;
+
+ return pitch;
+}
+
+/*
+ *-----------------------------------------------------------------------------
+ *
+ * svga3dsurface_get_image_buffer_size --
+ *
+ * Return the number of bytes of buffer space required to store
+ * one image of a surface, optionally using the specified pitch.
+ *
+ * If pitch is zero, it is assumed that rows are tightly packed.
+ *
+ * This function is overflow-safe. If the result would have
+ * overflowed, instead we return MAX_UINT32.
+ *
+ * Results:
+ * Byte count.
+ *
+ * Side effects:
+ * None.
+ *
+ *-----------------------------------------------------------------------------
+ */
+
+static inline u32
+svga3dsurface_get_image_buffer_size(const struct svga3d_surface_desc *desc,
+ const surf_size_struct *size,
+ u32 pitch)
+{
+ surf_size_struct image_blocks;
+ u32 slice_size, total_size;
+
+ svga3dsurface_get_size_in_blocks(desc, size, &image_blocks);
+
+ if (svga3dsurface_is_planar_surface(desc)) {
+ total_size = clamped_umul32(image_blocks.width,
+ image_blocks.height);
+ total_size = clamped_umul32(total_size, image_blocks.depth);
+ total_size = clamped_umul32(total_size, desc->bytes_per_block);
+ return total_size;
+ }
+
+ if (pitch == 0)
+ pitch = svga3dsurface_calculate_pitch(desc, size);
+
+ slice_size = clamped_umul32(image_blocks.height, pitch);
+ total_size = clamped_umul32(slice_size, image_blocks.depth);
+
+ return total_size;
+}
+
+static inline u32
+svga3dsurface_get_serialized_size(SVGA3dSurfaceFormat format,
+ surf_size_struct base_level_size,
+ u32 num_mip_levels,
+ bool cubemap)
+{
+ const struct svga3d_surface_desc *desc = svga3dsurface_get_desc(format);
+ u32 total_size = 0;
+ u32 mip;
+
+ for (mip = 0; mip < num_mip_levels; mip++) {
+ surf_size_struct size =
+ svga3dsurface_get_mip_size(base_level_size, mip);
+ total_size += svga3dsurface_get_image_buffer_size(desc,
+ &size, 0);
+ }
+
+ if (cubemap)
+ total_size *= SVGA3D_MAX_SURFACE_FACES;
+
+ return total_size;
+}
+
+
+/**
+ * svga3dsurface_get_pixel_offset - Compute the offset (in bytes) to a pixel
+ * in an image (or volume).
+ *
+ * @width: The image width in pixels.
+ * @height: The image height in pixels
+ */
+static inline u32
+svga3dsurface_get_pixel_offset(SVGA3dSurfaceFormat format,
+ u32 width, u32 height,
+ u32 x, u32 y, u32 z)
+{
+ const struct svga3d_surface_desc *desc = svga3dsurface_get_desc(format);
+ const u32 bw = desc->block_size.width, bh = desc->block_size.height;
+ const u32 bd = desc->block_size.depth;
+ const u32 rowstride = DIV_ROUND_UP(width, bw) * desc->bytes_per_block;
+ const u32 imgstride = DIV_ROUND_UP(height, bh) * rowstride;
+ const u32 offset = (z / bd * imgstride +
+ y / bh * rowstride +
+ x / bw * desc->bytes_per_block);
+ return offset;
+}
+
+
+static inline u32
+svga3dsurface_get_image_offset(SVGA3dSurfaceFormat format,
+ surf_size_struct baseLevelSize,
+ u32 numMipLevels,
+ u32 face,
+ u32 mip)
+
+{
+ u32 offset;
+ u32 mipChainBytes;
+ u32 mipChainBytesToLevel;
+ u32 i;
+ const struct svga3d_surface_desc *desc;
+ surf_size_struct mipSize;
+ u32 bytes;
+
+ desc = svga3dsurface_get_desc(format);
+
+ mipChainBytes = 0;
+ mipChainBytesToLevel = 0;
+ for (i = 0; i < numMipLevels; i++) {
+ mipSize = svga3dsurface_get_mip_size(baseLevelSize, i);
+ bytes = svga3dsurface_get_image_buffer_size(desc, &mipSize, 0);
+ mipChainBytes += bytes;
+ if (i < mip)
+ mipChainBytesToLevel += bytes;
+ }
+
+ offset = mipChainBytes * face + mipChainBytesToLevel;
+
+ return offset;
+}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c b/drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c
index ef1109c8fec8..96dc84dc34d0 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c
@@ -248,13 +248,12 @@ void vmw_evict_flags(struct ttm_buffer_object *bo,
*placement = vmw_sys_placement;
}
-/**
- * FIXME: Proper access checks on buffers.
- */
-
static int vmw_verify_access(struct ttm_buffer_object *bo, struct file *filp)
{
- return 0;
+ struct ttm_object_file *tfile =
+ vmw_fpriv((struct drm_file *)filp->private_data)->tfile;
+
+ return vmw_user_dmabuf_verify_access(bo, tfile);
}
static int vmw_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_context.c b/drivers/gpu/drm/vmwgfx/vmwgfx_context.c
new file mode 100644
index 000000000000..00ae0925aca8
--- /dev/null
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_context.c
@@ -0,0 +1,274 @@
+/**************************************************************************
+ *
+ * Copyright © 2009-2012 VMware, Inc., Palo Alto, CA., USA
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+#include "vmwgfx_drv.h"
+#include "vmwgfx_resource_priv.h"
+#include "ttm/ttm_placement.h"
+
+struct vmw_user_context {
+ struct ttm_base_object base;
+ struct vmw_resource res;
+};
+
+static void vmw_user_context_free(struct vmw_resource *res);
+static struct vmw_resource *
+vmw_user_context_base_to_res(struct ttm_base_object *base);
+
+static uint64_t vmw_user_context_size;
+
+static const struct vmw_user_resource_conv user_context_conv = {
+ .object_type = VMW_RES_CONTEXT,
+ .base_obj_to_res = vmw_user_context_base_to_res,
+ .res_free = vmw_user_context_free
+};
+
+const struct vmw_user_resource_conv *user_context_converter =
+ &user_context_conv;
+
+
+static const struct vmw_res_func vmw_legacy_context_func = {
+ .res_type = vmw_res_context,
+ .needs_backup = false,
+ .may_evict = false,
+ .type_name = "legacy contexts",
+ .backup_placement = NULL,
+ .create = NULL,
+ .destroy = NULL,
+ .bind = NULL,
+ .unbind = NULL
+};
+
+/**
+ * Context management:
+ */
+
+static void vmw_hw_context_destroy(struct vmw_resource *res)
+{
+
+ struct vmw_private *dev_priv = res->dev_priv;
+ struct {
+ SVGA3dCmdHeader header;
+ SVGA3dCmdDestroyContext body;
+ } *cmd;
+
+
+ vmw_execbuf_release_pinned_bo(dev_priv);
+ cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
+ if (unlikely(cmd == NULL)) {
+ DRM_ERROR("Failed reserving FIFO space for surface "
+ "destruction.\n");
+ return;
+ }
+
+ cmd->header.id = cpu_to_le32(SVGA_3D_CMD_CONTEXT_DESTROY);
+ cmd->header.size = cpu_to_le32(sizeof(cmd->body));
+ cmd->body.cid = cpu_to_le32(res->id);
+
+ vmw_fifo_commit(dev_priv, sizeof(*cmd));
+ vmw_3d_resource_dec(dev_priv, false);
+}
+
+static int vmw_context_init(struct vmw_private *dev_priv,
+ struct vmw_resource *res,
+ void (*res_free) (struct vmw_resource *res))
+{
+ int ret;
+
+ struct {
+ SVGA3dCmdHeader header;
+ SVGA3dCmdDefineContext body;
+ } *cmd;
+
+ ret = vmw_resource_init(dev_priv, res, false,
+ res_free, &vmw_legacy_context_func);
+
+ if (unlikely(ret != 0)) {
+ DRM_ERROR("Failed to allocate a resource id.\n");
+ goto out_early;
+ }
+
+ if (unlikely(res->id >= SVGA3D_MAX_CONTEXT_IDS)) {
+ DRM_ERROR("Out of hw context ids.\n");
+ vmw_resource_unreference(&res);
+ return -ENOMEM;
+ }
+
+ cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
+ if (unlikely(cmd == NULL)) {
+ DRM_ERROR("Fifo reserve failed.\n");
+ vmw_resource_unreference(&res);
+ return -ENOMEM;
+ }
+
+ cmd->header.id = cpu_to_le32(SVGA_3D_CMD_CONTEXT_DEFINE);
+ cmd->header.size = cpu_to_le32(sizeof(cmd->body));
+ cmd->body.cid = cpu_to_le32(res->id);
+
+ vmw_fifo_commit(dev_priv, sizeof(*cmd));
+ (void) vmw_3d_resource_inc(dev_priv, false);
+ vmw_resource_activate(res, vmw_hw_context_destroy);
+ return 0;
+
+out_early:
+ if (res_free == NULL)
+ kfree(res);
+ else
+ res_free(res);
+ return ret;
+}
+
+struct vmw_resource *vmw_context_alloc(struct vmw_private *dev_priv)
+{
+ struct vmw_resource *res = kmalloc(sizeof(*res), GFP_KERNEL);
+ int ret;
+
+ if (unlikely(res == NULL))
+ return NULL;
+
+ ret = vmw_context_init(dev_priv, res, NULL);
+
+ return (ret == 0) ? res : NULL;
+}
+
+/**
+ * User-space context management:
+ */
+
+static struct vmw_resource *
+vmw_user_context_base_to_res(struct ttm_base_object *base)
+{
+ return &(container_of(base, struct vmw_user_context, base)->res);
+}
+
+static void vmw_user_context_free(struct vmw_resource *res)
+{
+ struct vmw_user_context *ctx =
+ container_of(res, struct vmw_user_context, res);
+ struct vmw_private *dev_priv = res->dev_priv;
+
+ ttm_base_object_kfree(ctx, base);
+ ttm_mem_global_free(vmw_mem_glob(dev_priv),
+ vmw_user_context_size);
+}
+
+/**
+ * This function is called when user space has no more references on the
+ * base object. It releases the base-object's reference on the resource object.
+ */
+
+static void vmw_user_context_base_release(struct ttm_base_object **p_base)
+{
+ struct ttm_base_object *base = *p_base;
+ struct vmw_user_context *ctx =
+ container_of(base, struct vmw_user_context, base);
+ struct vmw_resource *res = &ctx->res;
+
+ *p_base = NULL;
+ vmw_resource_unreference(&res);
+}
+
+int vmw_context_destroy_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
+{
+ struct drm_vmw_context_arg *arg = (struct drm_vmw_context_arg *)data;
+ struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
+
+ return ttm_ref_object_base_unref(tfile, arg->cid, TTM_REF_USAGE);
+}
+
+int vmw_context_define_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
+{
+ struct vmw_private *dev_priv = vmw_priv(dev);
+ struct vmw_user_context *ctx;
+ struct vmw_resource *res;
+ struct vmw_resource *tmp;
+ struct drm_vmw_context_arg *arg = (struct drm_vmw_context_arg *)data;
+ struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
+ struct vmw_master *vmaster = vmw_master(file_priv->master);
+ int ret;
+
+
+ /*
+ * Approximate idr memory usage with 128 bytes. It will be limited
+ * by maximum number_of contexts anyway.
+ */
+
+ if (unlikely(vmw_user_context_size == 0))
+ vmw_user_context_size = ttm_round_pot(sizeof(*ctx)) + 128;
+
+ ret = ttm_read_lock(&vmaster->lock, true);
+ if (unlikely(ret != 0))
+ return ret;
+
+ ret = ttm_mem_global_alloc(vmw_mem_glob(dev_priv),
+ vmw_user_context_size,
+ false, true);
+ if (unlikely(ret != 0)) {
+ if (ret != -ERESTARTSYS)
+ DRM_ERROR("Out of graphics memory for context"
+ " creation.\n");
+ goto out_unlock;
+ }
+
+ ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
+ if (unlikely(ctx == NULL)) {
+ ttm_mem_global_free(vmw_mem_glob(dev_priv),
+ vmw_user_context_size);
+ ret = -ENOMEM;
+ goto out_unlock;
+ }
+
+ res = &ctx->res;
+ ctx->base.shareable = false;
+ ctx->base.tfile = NULL;
+
+ /*
+ * From here on, the destructor takes over resource freeing.
+ */
+
+ ret = vmw_context_init(dev_priv, res, vmw_user_context_free);
+ if (unlikely(ret != 0))
+ goto out_unlock;
+
+ tmp = vmw_resource_reference(&ctx->res);
+ ret = ttm_base_object_init(tfile, &ctx->base, false, VMW_RES_CONTEXT,
+ &vmw_user_context_base_release, NULL);
+
+ if (unlikely(ret != 0)) {
+ vmw_resource_unreference(&tmp);
+ goto out_err;
+ }
+
+ arg->cid = ctx->base.hash.key;
+out_err:
+ vmw_resource_unreference(&res);
+out_unlock:
+ ttm_read_unlock(&vmaster->lock);
+ return ret;
+
+}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_dmabuf.c b/drivers/gpu/drm/vmwgfx/vmwgfx_dmabuf.c
index bd78257cba8b..5fae06ad7e25 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_dmabuf.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_dmabuf.c
@@ -60,13 +60,13 @@ int vmw_dmabuf_to_placement(struct vmw_private *dev_priv,
if (unlikely(ret != 0))
return ret;
- vmw_execbuf_release_pinned_bo(dev_priv, false, 0);
+ vmw_execbuf_release_pinned_bo(dev_priv);
ret = ttm_bo_reserve(bo, interruptible, false, false, 0);
if (unlikely(ret != 0))
goto err;
- ret = ttm_bo_validate(bo, placement, interruptible, false, false);
+ ret = ttm_bo_validate(bo, placement, interruptible, false);
ttm_bo_unreserve(bo);
@@ -105,7 +105,7 @@ int vmw_dmabuf_to_vram_or_gmr(struct vmw_private *dev_priv,
return ret;
if (pin)
- vmw_execbuf_release_pinned_bo(dev_priv, false, 0);
+ vmw_execbuf_release_pinned_bo(dev_priv);
ret = ttm_bo_reserve(bo, interruptible, false, false, 0);
if (unlikely(ret != 0))
@@ -123,7 +123,7 @@ int vmw_dmabuf_to_vram_or_gmr(struct vmw_private *dev_priv,
else
placement = &vmw_vram_gmr_placement;
- ret = ttm_bo_validate(bo, placement, interruptible, false, false);
+ ret = ttm_bo_validate(bo, placement, interruptible, false);
if (likely(ret == 0) || ret == -ERESTARTSYS)
goto err_unreserve;
@@ -138,7 +138,7 @@ int vmw_dmabuf_to_vram_or_gmr(struct vmw_private *dev_priv,
else
placement = &vmw_vram_placement;
- ret = ttm_bo_validate(bo, placement, interruptible, false, false);
+ ret = ttm_bo_validate(bo, placement, interruptible, false);
err_unreserve:
ttm_bo_unreserve(bo);
@@ -214,8 +214,7 @@ int vmw_dmabuf_to_start_of_vram(struct vmw_private *dev_priv,
return ret;
if (pin)
- vmw_execbuf_release_pinned_bo(dev_priv, false, 0);
-
+ vmw_execbuf_release_pinned_bo(dev_priv);
ret = ttm_bo_reserve(bo, interruptible, false, false, 0);
if (unlikely(ret != 0))
goto err_unlock;
@@ -224,10 +223,9 @@ int vmw_dmabuf_to_start_of_vram(struct vmw_private *dev_priv,
if (bo->mem.mem_type == TTM_PL_VRAM &&
bo->mem.start < bo->num_pages &&
bo->mem.start > 0)
- (void) ttm_bo_validate(bo, &vmw_sys_placement, false,
- false, false);
+ (void) ttm_bo_validate(bo, &vmw_sys_placement, false, false);
- ret = ttm_bo_validate(bo, &placement, interruptible, false, false);
+ ret = ttm_bo_validate(bo, &placement, interruptible, false);
/* For some reason we didn't up at the start of vram */
WARN_ON(ret == 0 && bo->offset != 0);
@@ -306,7 +304,7 @@ void vmw_bo_pin(struct ttm_buffer_object *bo, bool pin)
BUG_ON(!ttm_bo_is_reserved(bo));
BUG_ON(old_mem_type != TTM_PL_VRAM &&
- old_mem_type != VMW_PL_FLAG_GMR);
+ old_mem_type != VMW_PL_GMR);
pl_flags = TTM_PL_FLAG_VRAM | VMW_PL_FLAG_GMR | TTM_PL_FLAG_CACHED;
if (pin)
@@ -316,7 +314,7 @@ void vmw_bo_pin(struct ttm_buffer_object *bo, bool pin)
placement.num_placement = 1;
placement.placement = &pl_flags;
- ret = ttm_bo_validate(bo, &placement, false, true, true);
+ ret = ttm_bo_validate(bo, &placement, false, true);
BUG_ON(ret != 0 || bo->mem.mem_type != old_mem_type);
}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
index 56973cd41735..161f8b2549aa 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
@@ -432,6 +432,7 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)
struct vmw_private *dev_priv;
int ret;
uint32_t svga_id;
+ enum vmw_res_type i;
dev_priv = kzalloc(sizeof(*dev_priv), GFP_KERNEL);
if (unlikely(dev_priv == NULL)) {
@@ -448,15 +449,18 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)
mutex_init(&dev_priv->cmdbuf_mutex);
mutex_init(&dev_priv->release_mutex);
rwlock_init(&dev_priv->resource_lock);
- idr_init(&dev_priv->context_idr);
- idr_init(&dev_priv->surface_idr);
- idr_init(&dev_priv->stream_idr);
+
+ for (i = vmw_res_context; i < vmw_res_max; ++i) {
+ idr_init(&dev_priv->res_idr[i]);
+ INIT_LIST_HEAD(&dev_priv->res_lru[i]);
+ }
+
mutex_init(&dev_priv->init_mutex);
init_waitqueue_head(&dev_priv->fence_queue);
init_waitqueue_head(&dev_priv->fifo_queue);
dev_priv->fence_queue_waiters = 0;
atomic_set(&dev_priv->fifo_queue_waiters, 0);
- INIT_LIST_HEAD(&dev_priv->surface_lru);
+
dev_priv->used_memory_size = 0;
dev_priv->io_start = pci_resource_start(dev->pdev, 0);
@@ -670,9 +674,9 @@ out_err2:
out_err1:
vmw_ttm_global_release(dev_priv);
out_err0:
- idr_destroy(&dev_priv->surface_idr);
- idr_destroy(&dev_priv->context_idr);
- idr_destroy(&dev_priv->stream_idr);
+ for (i = vmw_res_context; i < vmw_res_max; ++i)
+ idr_destroy(&dev_priv->res_idr[i]);
+
kfree(dev_priv);
return ret;
}
@@ -680,9 +684,12 @@ out_err0:
static int vmw_driver_unload(struct drm_device *dev)
{
struct vmw_private *dev_priv = vmw_priv(dev);
+ enum vmw_res_type i;
unregister_pm_notifier(&dev_priv->pm_nb);
+ if (dev_priv->ctx.res_ht_initialized)
+ drm_ht_remove(&dev_priv->ctx.res_ht);
if (dev_priv->ctx.cmd_bounce)
vfree(dev_priv->ctx.cmd_bounce);
if (dev_priv->enable_fb) {
@@ -709,9 +716,9 @@ static int vmw_driver_unload(struct drm_device *dev)
(void)ttm_bo_clean_mm(&dev_priv->bdev, TTM_PL_VRAM);
(void)ttm_bo_device_release(&dev_priv->bdev);
vmw_ttm_global_release(dev_priv);
- idr_destroy(&dev_priv->surface_idr);
- idr_destroy(&dev_priv->context_idr);
- idr_destroy(&dev_priv->stream_idr);
+
+ for (i = vmw_res_context; i < vmw_res_max; ++i)
+ idr_destroy(&dev_priv->res_idr[i]);
kfree(dev_priv);
@@ -935,7 +942,7 @@ static void vmw_master_drop(struct drm_device *dev,
vmw_fp->locked_master = drm_master_get(file_priv->master);
ret = ttm_vt_lock(&vmaster->lock, false, vmw_fp->tfile);
- vmw_execbuf_release_pinned_bo(dev_priv, false, 0);
+ vmw_execbuf_release_pinned_bo(dev_priv);
if (unlikely((ret != 0))) {
DRM_ERROR("Unable to lock TTM at VT switch.\n");
@@ -987,7 +994,8 @@ static int vmwgfx_pm_notifier(struct notifier_block *nb, unsigned long val,
* This empties VRAM and unbinds all GMR bindings.
* Buffer contents is moved to swappable memory.
*/
- vmw_execbuf_release_pinned_bo(dev_priv, false, 0);
+ vmw_execbuf_release_pinned_bo(dev_priv);
+ vmw_resource_evict_all(dev_priv);
ttm_bo_swapout_all(&dev_priv->bdev);
break;
@@ -1084,6 +1092,11 @@ static void vmw_pm_complete(struct device *kdev)
struct drm_device *dev = pci_get_drvdata(pdev);
struct vmw_private *dev_priv = vmw_priv(dev);
+ mutex_lock(&dev_priv->hw_mutex);
+ vmw_write(dev_priv, SVGA_REG_ID, SVGA_ID_2);
+ (void) vmw_read(dev_priv, SVGA_REG_ID);
+ mutex_unlock(&dev_priv->hw_mutex);
+
/**
* Reclaim 3d reference held by fbdev and potentially
* start fifo.
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
index 7c6f6e3a3c81..13aeda71280e 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
@@ -67,31 +67,46 @@ struct vmw_fpriv {
struct vmw_dma_buffer {
struct ttm_buffer_object base;
- struct list_head validate_list;
- bool gmr_bound;
- uint32_t cur_validate_node;
- bool on_validate_list;
+ struct list_head res_list;
};
+/**
+ * struct vmw_validate_buffer - Carries validation info about buffers.
+ *
+ * @base: Validation info for TTM.
+ * @hash: Hash entry for quick lookup of the TTM buffer object.
+ *
+ * This structure contains also driver private validation info
+ * on top of the info needed by TTM.
+ */
+struct vmw_validate_buffer {
+ struct ttm_validate_buffer base;
+ struct drm_hash_item hash;
+};
+
+struct vmw_res_func;
struct vmw_resource {
struct kref kref;
struct vmw_private *dev_priv;
- struct idr *idr;
int id;
- enum ttm_object_type res_type;
bool avail;
- void (*remove_from_lists) (struct vmw_resource *res);
- void (*hw_destroy) (struct vmw_resource *res);
+ unsigned long backup_size;
+ bool res_dirty; /* Protected by backup buffer reserved */
+ bool backup_dirty; /* Protected by backup buffer reserved */
+ struct vmw_dma_buffer *backup;
+ unsigned long backup_offset;
+ const struct vmw_res_func *func;
+ struct list_head lru_head; /* Protected by the resource lock */
+ struct list_head mob_head; /* Protected by @backup reserved */
void (*res_free) (struct vmw_resource *res);
- struct list_head validate_head;
- struct list_head query_head; /* Protected by the cmdbuf mutex */
- /* TODO is a generic snooper needed? */
-#if 0
- void (*snoop)(struct vmw_resource *res,
- struct ttm_object_file *tfile,
- SVGA3dCmdHeader *header);
- void *snoop_priv;
-#endif
+ void (*hw_destroy) (struct vmw_resource *res);
+};
+
+enum vmw_res_type {
+ vmw_res_context,
+ vmw_res_surface,
+ vmw_res_stream,
+ vmw_res_max
};
struct vmw_cursor_snooper {
@@ -105,20 +120,18 @@ struct vmw_surface_offset;
struct vmw_surface {
struct vmw_resource res;
- struct list_head lru_head; /* Protected by the resource lock */
uint32_t flags;
uint32_t format;
uint32_t mip_levels[DRM_VMW_MAX_SURFACE_FACES];
+ struct drm_vmw_size base_size;
struct drm_vmw_size *sizes;
uint32_t num_sizes;
-
bool scanout;
-
/* TODO so far just a extra pointer */
struct vmw_cursor_snooper snooper;
- struct ttm_buffer_object *backup;
struct vmw_surface_offset *offsets;
- uint32_t backup_size;
+ SVGA3dTextureFilter autogen_filter;
+ uint32_t multisample_count;
};
struct vmw_marker_queue {
@@ -145,29 +158,46 @@ struct vmw_relocation {
uint32_t index;
};
+/**
+ * struct vmw_res_cache_entry - resource information cache entry
+ *
+ * @valid: Whether the entry is valid, which also implies that the execbuf
+ * code holds a reference to the resource, and it's placed on the
+ * validation list.
+ * @handle: User-space handle of a resource.
+ * @res: Non-ref-counted pointer to the resource.
+ *
+ * Used to avoid frequent repeated user-space handle lookups of the
+ * same resource.
+ */
+struct vmw_res_cache_entry {
+ bool valid;
+ uint32_t handle;
+ struct vmw_resource *res;
+ struct vmw_resource_val_node *node;
+};
+
struct vmw_sw_context{
- struct ida bo_list;
- uint32_t last_cid;
- bool cid_valid;
+ struct drm_open_hash res_ht;
+ bool res_ht_initialized;
bool kernel; /**< is the called made from the kernel */
- struct vmw_resource *cur_ctx;
- uint32_t last_sid;
- uint32_t sid_translation;
- bool sid_valid;
struct ttm_object_file *tfile;
struct list_head validate_nodes;
struct vmw_relocation relocs[VMWGFX_MAX_RELOCATIONS];
uint32_t cur_reloc;
- struct ttm_validate_buffer val_bufs[VMWGFX_MAX_VALIDATIONS];
+ struct vmw_validate_buffer val_bufs[VMWGFX_MAX_VALIDATIONS];
uint32_t cur_val_buf;
uint32_t *cmd_bounce;
uint32_t cmd_bounce_size;
struct list_head resource_list;
uint32_t fence_flags;
- struct list_head query_list;
struct ttm_buffer_object *cur_query_bo;
- uint32_t cur_query_cid;
- bool query_cid_valid;
+ struct list_head res_relocations;
+ uint32_t *buf_start;
+ struct vmw_res_cache_entry res_cache[vmw_res_max];
+ struct vmw_resource *last_query_ctx;
+ bool needs_post_query_barrier;
+ struct vmw_resource *error_resource;
};
struct vmw_legacy_display;
@@ -242,10 +272,7 @@ struct vmw_private {
*/
rwlock_t resource_lock;
- struct idr context_idr;
- struct idr surface_idr;
- struct idr stream_idr;
-
+ struct idr res_idr[vmw_res_max];
/*
* Block lastclose from racing with firstopen.
*/
@@ -320,6 +347,7 @@ struct vmw_private {
struct ttm_buffer_object *dummy_query_bo;
struct ttm_buffer_object *pinned_bo;
uint32_t query_cid;
+ uint32_t query_cid_valid;
bool dummy_query_bo_pinned;
/*
@@ -329,10 +357,15 @@ struct vmw_private {
* protected by the cmdbuf mutex for simplicity.
*/
- struct list_head surface_lru;
+ struct list_head res_lru[vmw_res_max];
uint32_t used_memory_size;
};
+static inline struct vmw_surface *vmw_res_to_srf(struct vmw_resource *res)
+{
+ return container_of(res, struct vmw_surface, res);
+}
+
static inline struct vmw_private *vmw_priv(struct drm_device *dev)
{
return (struct vmw_private *)dev->dev_private;
@@ -381,10 +414,16 @@ extern void vmw_gmr_unbind(struct vmw_private *dev_priv, int gmr_id);
/**
* Resource utilities - vmwgfx_resource.c
*/
+struct vmw_user_resource_conv;
+extern const struct vmw_user_resource_conv *user_surface_converter;
+extern const struct vmw_user_resource_conv *user_context_converter;
extern struct vmw_resource *vmw_context_alloc(struct vmw_private *dev_priv);
extern void vmw_resource_unreference(struct vmw_resource **p_res);
extern struct vmw_resource *vmw_resource_reference(struct vmw_resource *res);
+extern int vmw_resource_validate(struct vmw_resource *res);
+extern int vmw_resource_reserve(struct vmw_resource *res, bool no_backup);
+extern bool vmw_resource_needs_backup(const struct vmw_resource *res);
extern int vmw_context_destroy_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
extern int vmw_context_define_ioctl(struct drm_device *dev, void *data,
@@ -398,14 +437,13 @@ extern int vmw_user_lookup_handle(struct vmw_private *dev_priv,
uint32_t handle,
struct vmw_surface **out_surf,
struct vmw_dma_buffer **out_buf);
+extern int vmw_user_resource_lookup_handle(
+ struct vmw_private *dev_priv,
+ struct ttm_object_file *tfile,
+ uint32_t handle,
+ const struct vmw_user_resource_conv *converter,
+ struct vmw_resource **p_res);
extern void vmw_surface_res_free(struct vmw_resource *res);
-extern int vmw_surface_init(struct vmw_private *dev_priv,
- struct vmw_surface *srf,
- void (*res_free) (struct vmw_resource *res));
-extern int vmw_user_surface_lookup_handle(struct vmw_private *dev_priv,
- struct ttm_object_file *tfile,
- uint32_t handle,
- struct vmw_surface **out);
extern int vmw_surface_destroy_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
extern int vmw_surface_define_ioctl(struct drm_device *dev, void *data,
@@ -423,6 +461,8 @@ extern int vmw_dmabuf_init(struct vmw_private *dev_priv,
size_t size, struct ttm_placement *placement,
bool interuptable,
void (*bo_free) (struct ttm_buffer_object *bo));
+extern int vmw_user_dmabuf_verify_access(struct ttm_buffer_object *bo,
+ struct ttm_object_file *tfile);
extern int vmw_dmabuf_alloc_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
extern int vmw_dmabuf_unref_ioctl(struct drm_device *dev, void *data,
@@ -440,7 +480,14 @@ extern int vmw_user_stream_lookup(struct vmw_private *dev_priv,
struct ttm_object_file *tfile,
uint32_t *inout_id,
struct vmw_resource **out);
-extern void vmw_resource_unreserve(struct list_head *list);
+extern void vmw_resource_unreserve(struct vmw_resource *res,
+ struct vmw_dma_buffer *new_backup,
+ unsigned long new_backup_offset);
+extern void vmw_resource_move_notify(struct ttm_buffer_object *bo,
+ struct ttm_mem_reg *mem);
+extern void vmw_fence_single_bo(struct ttm_buffer_object *bo,
+ struct vmw_fence_obj *fence);
+extern void vmw_resource_evict_all(struct vmw_private *dev_priv);
/**
* DMA buffer helper routines - vmwgfx_dmabuf.c
@@ -538,10 +585,9 @@ extern int vmw_execbuf_process(struct drm_file *file_priv,
struct drm_vmw_fence_rep __user
*user_fence_rep,
struct vmw_fence_obj **out_fence);
-
-extern void
-vmw_execbuf_release_pinned_bo(struct vmw_private *dev_priv,
- bool only_on_cid_match, uint32_t cid);
+extern void __vmw_execbuf_release_pinned_bo(struct vmw_private *dev_priv,
+ struct vmw_fence_obj *fence);
+extern void vmw_execbuf_release_pinned_bo(struct vmw_private *dev_priv);
extern int vmw_execbuf_fence_commands(struct drm_file *file_priv,
struct vmw_private *dev_priv,
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
index e5775a0db495..394e6476105b 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
@@ -30,6 +30,181 @@
#include <drm/ttm/ttm_bo_api.h>
#include <drm/ttm/ttm_placement.h>
+#define VMW_RES_HT_ORDER 12
+
+/**
+ * struct vmw_resource_relocation - Relocation info for resources
+ *
+ * @head: List head for the software context's relocation list.
+ * @res: Non-ref-counted pointer to the resource.
+ * @offset: Offset of 4 byte entries into the command buffer where the
+ * id that needs fixup is located.
+ */
+struct vmw_resource_relocation {
+ struct list_head head;
+ const struct vmw_resource *res;
+ unsigned long offset;
+};
+
+/**
+ * struct vmw_resource_val_node - Validation info for resources
+ *
+ * @head: List head for the software context's resource list.
+ * @hash: Hash entry for quick resouce to val_node lookup.
+ * @res: Ref-counted pointer to the resource.
+ * @switch_backup: Boolean whether to switch backup buffer on unreserve.
+ * @new_backup: Refcounted pointer to the new backup buffer.
+ * @new_backup_offset: New backup buffer offset if @new_backup is non-NUll.
+ * @first_usage: Set to true the first time the resource is referenced in
+ * the command stream.
+ * @no_buffer_needed: Resources do not need to allocate buffer backup on
+ * reservation. The command stream will provide one.
+ */
+struct vmw_resource_val_node {
+ struct list_head head;
+ struct drm_hash_item hash;
+ struct vmw_resource *res;
+ struct vmw_dma_buffer *new_backup;
+ unsigned long new_backup_offset;
+ bool first_usage;
+ bool no_buffer_needed;
+};
+
+/**
+ * vmw_resource_unreserve - unreserve resources previously reserved for
+ * command submission.
+ *
+ * @list_head: list of resources to unreserve.
+ * @backoff: Whether command submission failed.
+ */
+static void vmw_resource_list_unreserve(struct list_head *list,
+ bool backoff)
+{
+ struct vmw_resource_val_node *val;
+
+ list_for_each_entry(val, list, head) {
+ struct vmw_resource *res = val->res;
+ struct vmw_dma_buffer *new_backup =
+ backoff ? NULL : val->new_backup;
+
+ vmw_resource_unreserve(res, new_backup,
+ val->new_backup_offset);
+ vmw_dmabuf_unreference(&val->new_backup);
+ }
+}
+
+
+/**
+ * vmw_resource_val_add - Add a resource to the software context's
+ * resource list if it's not already on it.
+ *
+ * @sw_context: Pointer to the software context.
+ * @res: Pointer to the resource.
+ * @p_node On successful return points to a valid pointer to a
+ * struct vmw_resource_val_node, if non-NULL on entry.
+ */
+static int vmw_resource_val_add(struct vmw_sw_context *sw_context,
+ struct vmw_resource *res,
+ struct vmw_resource_val_node **p_node)
+{
+ struct vmw_resource_val_node *node;
+ struct drm_hash_item *hash;
+ int ret;
+
+ if (likely(drm_ht_find_item(&sw_context->res_ht, (unsigned long) res,
+ &hash) == 0)) {
+ node = container_of(hash, struct vmw_resource_val_node, hash);
+ node->first_usage = false;
+ if (unlikely(p_node != NULL))
+ *p_node = node;
+ return 0;
+ }
+
+ node = kzalloc(sizeof(*node), GFP_KERNEL);
+ if (unlikely(node == NULL)) {
+ DRM_ERROR("Failed to allocate a resource validation "
+ "entry.\n");
+ return -ENOMEM;
+ }
+
+ node->hash.key = (unsigned long) res;
+ ret = drm_ht_insert_item(&sw_context->res_ht, &node->hash);
+ if (unlikely(ret != 0)) {
+ DRM_ERROR("Failed to initialize a resource validation "
+ "entry.\n");
+ kfree(node);
+ return ret;
+ }
+ list_add_tail(&node->head, &sw_context->resource_list);
+ node->res = vmw_resource_reference(res);
+ node->first_usage = true;
+
+ if (unlikely(p_node != NULL))
+ *p_node = node;
+
+ return 0;
+}
+
+/**
+ * vmw_resource_relocation_add - Add a relocation to the relocation list
+ *
+ * @list: Pointer to head of relocation list.
+ * @res: The resource.
+ * @offset: Offset into the command buffer currently being parsed where the
+ * id that needs fixup is located. Granularity is 4 bytes.
+ */
+static int vmw_resource_relocation_add(struct list_head *list,
+ const struct vmw_resource *res,
+ unsigned long offset)
+{
+ struct vmw_resource_relocation *rel;
+
+ rel = kmalloc(sizeof(*rel), GFP_KERNEL);
+ if (unlikely(rel == NULL)) {
+ DRM_ERROR("Failed to allocate a resource relocation.\n");
+ return -ENOMEM;
+ }
+
+ rel->res = res;
+ rel->offset = offset;
+ list_add_tail(&rel->head, list);
+
+ return 0;
+}
+
+/**
+ * vmw_resource_relocations_free - Free all relocations on a list
+ *
+ * @list: Pointer to the head of the relocation list.
+ */
+static void vmw_resource_relocations_free(struct list_head *list)
+{
+ struct vmw_resource_relocation *rel, *n;
+
+ list_for_each_entry_safe(rel, n, list, head) {
+ list_del(&rel->head);
+ kfree(rel);
+ }
+}
+
+/**
+ * vmw_resource_relocations_apply - Apply all relocations on a list
+ *
+ * @cb: Pointer to the start of the command buffer bein patch. This need
+ * not be the same buffer as the one being parsed when the relocation
+ * list was built, but the contents must be the same modulo the
+ * resource ids.
+ * @list: Pointer to the head of the relocation list.
+ */
+static void vmw_resource_relocations_apply(uint32_t *cb,
+ struct list_head *list)
+{
+ struct vmw_resource_relocation *rel;
+
+ list_for_each_entry(rel, list, head)
+ cb[rel->offset] = rel->res->id;
+}
+
static int vmw_cmd_invalid(struct vmw_private *dev_priv,
struct vmw_sw_context *sw_context,
SVGA3dCmdHeader *header)
@@ -44,25 +219,11 @@ static int vmw_cmd_ok(struct vmw_private *dev_priv,
return 0;
}
-static void vmw_resource_to_validate_list(struct vmw_sw_context *sw_context,
- struct vmw_resource **p_res)
-{
- struct vmw_resource *res = *p_res;
-
- if (list_empty(&res->validate_head)) {
- list_add_tail(&res->validate_head, &sw_context->resource_list);
- *p_res = NULL;
- } else
- vmw_resource_unreference(p_res);
-}
-
/**
* vmw_bo_to_validate_list - add a bo to a validate list
*
* @sw_context: The software context used for this command submission batch.
* @bo: The buffer object to add.
- * @fence_flags: Fence flags to be or'ed with any other fence flags for
- * this buffer on this submission batch.
* @p_val_node: If non-NULL Will be updated with the validate node number
* on return.
*
@@ -74,21 +235,37 @@ static int vmw_bo_to_validate_list(struct vmw_sw_context *sw_context,
uint32_t *p_val_node)
{
uint32_t val_node;
+ struct vmw_validate_buffer *vval_buf;
struct ttm_validate_buffer *val_buf;
+ struct drm_hash_item *hash;
+ int ret;
- val_node = vmw_dmabuf_validate_node(bo, sw_context->cur_val_buf);
-
- if (unlikely(val_node >= VMWGFX_MAX_VALIDATIONS)) {
- DRM_ERROR("Max number of DMA buffers per submission"
- " exceeded.\n");
- return -EINVAL;
- }
-
- val_buf = &sw_context->val_bufs[val_node];
- if (unlikely(val_node == sw_context->cur_val_buf)) {
+ if (likely(drm_ht_find_item(&sw_context->res_ht, (unsigned long) bo,
+ &hash) == 0)) {
+ vval_buf = container_of(hash, struct vmw_validate_buffer,
+ hash);
+ val_buf = &vval_buf->base;
+ val_node = vval_buf - sw_context->val_bufs;
+ } else {
+ val_node = sw_context->cur_val_buf;
+ if (unlikely(val_node >= VMWGFX_MAX_VALIDATIONS)) {
+ DRM_ERROR("Max number of DMA buffers per submission "
+ "exceeded.\n");
+ return -EINVAL;
+ }
+ vval_buf = &sw_context->val_bufs[val_node];
+ vval_buf->hash.key = (unsigned long) bo;
+ ret = drm_ht_insert_item(&sw_context->res_ht, &vval_buf->hash);
+ if (unlikely(ret != 0)) {
+ DRM_ERROR("Failed to initialize a buffer validation "
+ "entry.\n");
+ return ret;
+ }
+ ++sw_context->cur_val_buf;
+ val_buf = &vval_buf->base;
val_buf->bo = ttm_bo_reference(bo);
+ val_buf->reserved = false;
list_add_tail(&val_buf->head, &sw_context->validate_nodes);
- ++sw_context->cur_val_buf;
}
sw_context->fence_flags |= DRM_VMW_FENCE_FLAG_EXEC;
@@ -99,85 +276,174 @@ static int vmw_bo_to_validate_list(struct vmw_sw_context *sw_context,
return 0;
}
-static int vmw_cmd_cid_check(struct vmw_private *dev_priv,
- struct vmw_sw_context *sw_context,
- SVGA3dCmdHeader *header)
+/**
+ * vmw_resources_reserve - Reserve all resources on the sw_context's
+ * resource list.
+ *
+ * @sw_context: Pointer to the software context.
+ *
+ * Note that since vmware's command submission currently is protected by
+ * the cmdbuf mutex, no fancy deadlock avoidance is required for resources,
+ * since only a single thread at once will attempt this.
+ */
+static int vmw_resources_reserve(struct vmw_sw_context *sw_context)
{
- struct vmw_resource *ctx;
-
- struct vmw_cid_cmd {
- SVGA3dCmdHeader header;
- __le32 cid;
- } *cmd;
+ struct vmw_resource_val_node *val;
int ret;
- cmd = container_of(header, struct vmw_cid_cmd, header);
- if (likely(sw_context->cid_valid && cmd->cid == sw_context->last_cid))
- return 0;
+ list_for_each_entry(val, &sw_context->resource_list, head) {
+ struct vmw_resource *res = val->res;
- ret = vmw_context_check(dev_priv, sw_context->tfile, cmd->cid,
- &ctx);
- if (unlikely(ret != 0)) {
- DRM_ERROR("Could not find or use context %u\n",
- (unsigned) cmd->cid);
- return ret;
+ ret = vmw_resource_reserve(res, val->no_buffer_needed);
+ if (unlikely(ret != 0))
+ return ret;
+
+ if (res->backup) {
+ struct ttm_buffer_object *bo = &res->backup->base;
+
+ ret = vmw_bo_to_validate_list
+ (sw_context, bo, NULL);
+
+ if (unlikely(ret != 0))
+ return ret;
+ }
}
+ return 0;
+}
- sw_context->last_cid = cmd->cid;
- sw_context->cid_valid = true;
- sw_context->cur_ctx = ctx;
- vmw_resource_to_validate_list(sw_context, &ctx);
+/**
+ * vmw_resources_validate - Validate all resources on the sw_context's
+ * resource list.
+ *
+ * @sw_context: Pointer to the software context.
+ *
+ * Before this function is called, all resource backup buffers must have
+ * been validated.
+ */
+static int vmw_resources_validate(struct vmw_sw_context *sw_context)
+{
+ struct vmw_resource_val_node *val;
+ int ret;
+
+ list_for_each_entry(val, &sw_context->resource_list, head) {
+ struct vmw_resource *res = val->res;
+ ret = vmw_resource_validate(res);
+ if (unlikely(ret != 0)) {
+ if (ret != -ERESTARTSYS)
+ DRM_ERROR("Failed to validate resource.\n");
+ return ret;
+ }
+ }
return 0;
}
-static int vmw_cmd_sid_check(struct vmw_private *dev_priv,
+/**
+ * vmw_cmd_res_check - Check that a resource is present and if so, put it
+ * on the resource validate list unless it's already there.
+ *
+ * @dev_priv: Pointer to a device private structure.
+ * @sw_context: Pointer to the software context.
+ * @res_type: Resource type.
+ * @converter: User-space visisble type specific information.
+ * @id: Pointer to the location in the command buffer currently being
+ * parsed from where the user-space resource id handle is located.
+ */
+static int vmw_cmd_res_check(struct vmw_private *dev_priv,
struct vmw_sw_context *sw_context,
- uint32_t *sid)
+ enum vmw_res_type res_type,
+ const struct vmw_user_resource_conv *converter,
+ uint32_t *id,
+ struct vmw_resource_val_node **p_val)
{
- struct vmw_surface *srf;
- int ret;
+ struct vmw_res_cache_entry *rcache =
+ &sw_context->res_cache[res_type];
struct vmw_resource *res;
+ struct vmw_resource_val_node *node;
+ int ret;
- if (*sid == SVGA3D_INVALID_ID)
+ if (*id == SVGA3D_INVALID_ID)
return 0;
- if (likely((sw_context->sid_valid &&
- *sid == sw_context->last_sid))) {
- *sid = sw_context->sid_translation;
- return 0;
- }
+ /*
+ * Fastpath in case of repeated commands referencing the same
+ * resource
+ */
- ret = vmw_user_surface_lookup_handle(dev_priv,
- sw_context->tfile,
- *sid, &srf);
- if (unlikely(ret != 0)) {
- DRM_ERROR("Could ot find or use surface 0x%08x "
- "address 0x%08lx\n",
- (unsigned int) *sid,
- (unsigned long) sid);
- return ret;
+ if (likely(rcache->valid && *id == rcache->handle)) {
+ const struct vmw_resource *res = rcache->res;
+
+ rcache->node->first_usage = false;
+ if (p_val)
+ *p_val = rcache->node;
+
+ return vmw_resource_relocation_add
+ (&sw_context->res_relocations, res,
+ id - sw_context->buf_start);
}
- ret = vmw_surface_validate(dev_priv, srf);
+ ret = vmw_user_resource_lookup_handle(dev_priv,
+ sw_context->tfile,
+ *id,
+ converter,
+ &res);
if (unlikely(ret != 0)) {
- if (ret != -ERESTARTSYS)
- DRM_ERROR("Could not validate surface.\n");
- vmw_surface_unreference(&srf);
+ DRM_ERROR("Could not find or use resource 0x%08x.\n",
+ (unsigned) *id);
+ dump_stack();
return ret;
}
- sw_context->last_sid = *sid;
- sw_context->sid_valid = true;
- sw_context->sid_translation = srf->res.id;
- *sid = sw_context->sid_translation;
+ rcache->valid = true;
+ rcache->res = res;
+ rcache->handle = *id;
- res = &srf->res;
- vmw_resource_to_validate_list(sw_context, &res);
+ ret = vmw_resource_relocation_add(&sw_context->res_relocations,
+ res,
+ id - sw_context->buf_start);
+ if (unlikely(ret != 0))
+ goto out_no_reloc;
+
+ ret = vmw_resource_val_add(sw_context, res, &node);
+ if (unlikely(ret != 0))
+ goto out_no_reloc;
+ rcache->node = node;
+ if (p_val)
+ *p_val = node;
+ vmw_resource_unreference(&res);
return 0;
+
+out_no_reloc:
+ BUG_ON(sw_context->error_resource != NULL);
+ sw_context->error_resource = res;
+
+ return ret;
}
+/**
+ * vmw_cmd_cid_check - Check a command header for valid context information.
+ *
+ * @dev_priv: Pointer to a device private structure.
+ * @sw_context: Pointer to the software context.
+ * @header: A command header with an embedded user-space context handle.
+ *
+ * Convenience function: Call vmw_cmd_res_check with the user-space context
+ * handle embedded in @header.
+ */
+static int vmw_cmd_cid_check(struct vmw_private *dev_priv,
+ struct vmw_sw_context *sw_context,
+ SVGA3dCmdHeader *header)
+{
+ struct vmw_cid_cmd {
+ SVGA3dCmdHeader header;
+ __le32 cid;
+ } *cmd;
+
+ cmd = container_of(header, struct vmw_cid_cmd, header);
+ return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
+ user_context_converter, &cmd->cid, NULL);
+}
static int vmw_cmd_set_render_target_check(struct vmw_private *dev_priv,
struct vmw_sw_context *sw_context,
@@ -194,7 +460,9 @@ static int vmw_cmd_set_render_target_check(struct vmw_private *dev_priv,
return ret;
cmd = container_of(header, struct vmw_sid_cmd, header);
- ret = vmw_cmd_sid_check(dev_priv, sw_context, &cmd->body.target.sid);
+ ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
+ user_surface_converter,
+ &cmd->body.target.sid, NULL);
return ret;
}
@@ -209,10 +477,14 @@ static int vmw_cmd_surface_copy_check(struct vmw_private *dev_priv,
int ret;
cmd = container_of(header, struct vmw_sid_cmd, header);
- ret = vmw_cmd_sid_check(dev_priv, sw_context, &cmd->body.src.sid);
+ ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
+ user_surface_converter,
+ &cmd->body.src.sid, NULL);
if (unlikely(ret != 0))
return ret;
- return vmw_cmd_sid_check(dev_priv, sw_context, &cmd->body.dest.sid);
+ return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
+ user_surface_converter,
+ &cmd->body.dest.sid, NULL);
}
static int vmw_cmd_stretch_blt_check(struct vmw_private *dev_priv,
@@ -226,10 +498,14 @@ static int vmw_cmd_stretch_blt_check(struct vmw_private *dev_priv,
int ret;
cmd = container_of(header, struct vmw_sid_cmd, header);
- ret = vmw_cmd_sid_check(dev_priv, sw_context, &cmd->body.src.sid);
+ ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
+ user_surface_converter,
+ &cmd->body.src.sid, NULL);
if (unlikely(ret != 0))
return ret;
- return vmw_cmd_sid_check(dev_priv, sw_context, &cmd->body.dest.sid);
+ return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
+ user_surface_converter,
+ &cmd->body.dest.sid, NULL);
}
static int vmw_cmd_blt_surf_screen_check(struct vmw_private *dev_priv,
@@ -248,7 +524,9 @@ static int vmw_cmd_blt_surf_screen_check(struct vmw_private *dev_priv,
return -EPERM;
}
- return vmw_cmd_sid_check(dev_priv, sw_context, &cmd->body.srcImage.sid);
+ return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
+ user_surface_converter,
+ &cmd->body.srcImage.sid, NULL);
}
static int vmw_cmd_present_check(struct vmw_private *dev_priv,
@@ -268,14 +546,15 @@ static int vmw_cmd_present_check(struct vmw_private *dev_priv,
return -EPERM;
}
- return vmw_cmd_sid_check(dev_priv, sw_context, &cmd->body.sid);
+ return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
+ user_surface_converter, &cmd->body.sid,
+ NULL);
}
/**
* vmw_query_bo_switch_prepare - Prepare to switch pinned buffer for queries.
*
* @dev_priv: The device private structure.
- * @cid: The hardware context for the next query.
* @new_query_bo: The new buffer holding query results.
* @sw_context: The software context used for this command submission.
*
@@ -283,18 +562,18 @@ static int vmw_cmd_present_check(struct vmw_private *dev_priv,
* query results, and if another buffer currently is pinned for query
* results. If so, the function prepares the state of @sw_context for
* switching pinned buffers after successful submission of the current
- * command batch. It also checks whether we're using a new query context.
- * In that case, it makes sure we emit a query barrier for the old
- * context before the current query buffer is fenced.
+ * command batch.
*/
static int vmw_query_bo_switch_prepare(struct vmw_private *dev_priv,
- uint32_t cid,
struct ttm_buffer_object *new_query_bo,
struct vmw_sw_context *sw_context)
{
+ struct vmw_res_cache_entry *ctx_entry =
+ &sw_context->res_cache[vmw_res_context];
int ret;
- bool add_cid = false;
- uint32_t cid_to_add;
+
+ BUG_ON(!ctx_entry->valid);
+ sw_context->last_query_ctx = ctx_entry->res;
if (unlikely(new_query_bo != sw_context->cur_query_bo)) {
@@ -304,9 +583,7 @@ static int vmw_query_bo_switch_prepare(struct vmw_private *dev_priv,
}
if (unlikely(sw_context->cur_query_bo != NULL)) {
- BUG_ON(!sw_context->query_cid_valid);
- add_cid = true;
- cid_to_add = sw_context->cur_query_cid;
+ sw_context->needs_post_query_barrier = true;
ret = vmw_bo_to_validate_list(sw_context,
sw_context->cur_query_bo,
NULL);
@@ -323,27 +600,6 @@ static int vmw_query_bo_switch_prepare(struct vmw_private *dev_priv,
}
- if (unlikely(cid != sw_context->cur_query_cid &&
- sw_context->query_cid_valid)) {
- add_cid = true;
- cid_to_add = sw_context->cur_query_cid;
- }
-
- sw_context->cur_query_cid = cid;
- sw_context->query_cid_valid = true;
-
- if (add_cid) {
- struct vmw_resource *ctx = sw_context->cur_ctx;
-
- if (list_empty(&ctx->query_head))
- list_add_tail(&ctx->query_head,
- &sw_context->query_list);
- ret = vmw_bo_to_validate_list(sw_context,
- dev_priv->dummy_query_bo,
- NULL);
- if (unlikely(ret != 0))
- return ret;
- }
return 0;
}
@@ -355,10 +611,9 @@ static int vmw_query_bo_switch_prepare(struct vmw_private *dev_priv,
* @sw_context: The software context used for this command submission batch.
*
* This function will check if we're switching query buffers, and will then,
- * if no other query waits are issued this command submission batch,
* issue a dummy occlusion query wait used as a query barrier. When the fence
* object following that query wait has signaled, we are sure that all
- * preseding queries have finished, and the old query buffer can be unpinned.
+ * preceding queries have finished, and the old query buffer can be unpinned.
* However, since both the new query buffer and the old one are fenced with
* that fence, we can do an asynchronus unpin now, and be sure that the
* old query buffer won't be moved until the fence has signaled.
@@ -369,20 +624,19 @@ static int vmw_query_bo_switch_prepare(struct vmw_private *dev_priv,
static void vmw_query_bo_switch_commit(struct vmw_private *dev_priv,
struct vmw_sw_context *sw_context)
{
-
- struct vmw_resource *ctx, *next_ctx;
- int ret;
-
/*
* The validate list should still hold references to all
* contexts here.
*/
- list_for_each_entry_safe(ctx, next_ctx, &sw_context->query_list,
- query_head) {
- list_del_init(&ctx->query_head);
+ if (sw_context->needs_post_query_barrier) {
+ struct vmw_res_cache_entry *ctx_entry =
+ &sw_context->res_cache[vmw_res_context];
+ struct vmw_resource *ctx;
+ int ret;
- BUG_ON(list_empty(&ctx->validate_head));
+ BUG_ON(!ctx_entry->valid);
+ ctx = ctx_entry->res;
ret = vmw_fifo_emit_dummy_query(dev_priv, ctx->id);
@@ -396,40 +650,46 @@ static void vmw_query_bo_switch_commit(struct vmw_private *dev_priv,
ttm_bo_unref(&dev_priv->pinned_bo);
}
- vmw_bo_pin(sw_context->cur_query_bo, true);
+ if (!sw_context->needs_post_query_barrier) {
+ vmw_bo_pin(sw_context->cur_query_bo, true);
- /*
- * We pin also the dummy_query_bo buffer so that we
- * don't need to validate it when emitting
- * dummy queries in context destroy paths.
- */
+ /*
+ * We pin also the dummy_query_bo buffer so that we
+ * don't need to validate it when emitting
+ * dummy queries in context destroy paths.
+ */
- vmw_bo_pin(dev_priv->dummy_query_bo, true);
- dev_priv->dummy_query_bo_pinned = true;
+ vmw_bo_pin(dev_priv->dummy_query_bo, true);
+ dev_priv->dummy_query_bo_pinned = true;
- dev_priv->query_cid = sw_context->cur_query_cid;
- dev_priv->pinned_bo =
- ttm_bo_reference(sw_context->cur_query_bo);
+ BUG_ON(sw_context->last_query_ctx == NULL);
+ dev_priv->query_cid = sw_context->last_query_ctx->id;
+ dev_priv->query_cid_valid = true;
+ dev_priv->pinned_bo =
+ ttm_bo_reference(sw_context->cur_query_bo);
+ }
}
}
/**
- * vmw_query_switch_backoff - clear query barrier list
- * @sw_context: The sw context used for this submission batch.
+ * vmw_translate_guest_pointer - Prepare to translate a user-space buffer
+ * handle to a valid SVGAGuestPtr
*
- * This function is used as part of an error path, where a previously
- * set up list of query barriers needs to be cleared.
+ * @dev_priv: Pointer to a device private structure.
+ * @sw_context: The software context used for this command batch validation.
+ * @ptr: Pointer to the user-space handle to be translated.
+ * @vmw_bo_p: Points to a location that, on successful return will carry
+ * a reference-counted pointer to the DMA buffer identified by the
+ * user-space handle in @id.
*
+ * This function saves information needed to translate a user-space buffer
+ * handle to a valid SVGAGuestPtr. The translation does not take place
+ * immediately, but during a call to vmw_apply_relocations().
+ * This function builds a relocation list and a list of buffers to validate.
+ * The former needs to be freed using either vmw_apply_relocations() or
+ * vmw_free_relocations(). The latter needs to be freed using
+ * vmw_clear_validations.
*/
-static void vmw_query_switch_backoff(struct vmw_sw_context *sw_context)
-{
- struct list_head *list, *next;
-
- list_for_each_safe(list, next, &sw_context->query_list) {
- list_del_init(list);
- }
-}
-
static int vmw_translate_guest_ptr(struct vmw_private *dev_priv,
struct vmw_sw_context *sw_context,
SVGAGuestPtr *ptr,
@@ -471,6 +731,37 @@ out_no_reloc:
return ret;
}
+/**
+ * vmw_cmd_begin_query - validate a SVGA_3D_CMD_BEGIN_QUERY command.
+ *
+ * @dev_priv: Pointer to a device private struct.
+ * @sw_context: The software context used for this command submission.
+ * @header: Pointer to the command header in the command stream.
+ */
+static int vmw_cmd_begin_query(struct vmw_private *dev_priv,
+ struct vmw_sw_context *sw_context,
+ SVGA3dCmdHeader *header)
+{
+ struct vmw_begin_query_cmd {
+ SVGA3dCmdHeader header;
+ SVGA3dCmdBeginQuery q;
+ } *cmd;
+
+ cmd = container_of(header, struct vmw_begin_query_cmd,
+ header);
+
+ return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
+ user_context_converter, &cmd->q.cid,
+ NULL);
+}
+
+/**
+ * vmw_cmd_end_query - validate a SVGA_3D_CMD_END_QUERY command.
+ *
+ * @dev_priv: Pointer to a device private struct.
+ * @sw_context: The software context used for this command submission.
+ * @header: Pointer to the command header in the command stream.
+ */
static int vmw_cmd_end_query(struct vmw_private *dev_priv,
struct vmw_sw_context *sw_context,
SVGA3dCmdHeader *header)
@@ -493,13 +784,19 @@ static int vmw_cmd_end_query(struct vmw_private *dev_priv,
if (unlikely(ret != 0))
return ret;
- ret = vmw_query_bo_switch_prepare(dev_priv, cmd->q.cid,
- &vmw_bo->base, sw_context);
+ ret = vmw_query_bo_switch_prepare(dev_priv, &vmw_bo->base, sw_context);
vmw_dmabuf_unreference(&vmw_bo);
return ret;
}
+/*
+ * vmw_cmd_wait_query - validate a SVGA_3D_CMD_WAIT_QUERY command.
+ *
+ * @dev_priv: Pointer to a device private struct.
+ * @sw_context: The software context used for this command submission.
+ * @header: Pointer to the command header in the command stream.
+ */
static int vmw_cmd_wait_query(struct vmw_private *dev_priv,
struct vmw_sw_context *sw_context,
SVGA3dCmdHeader *header)
@@ -510,7 +807,6 @@ static int vmw_cmd_wait_query(struct vmw_private *dev_priv,
SVGA3dCmdWaitForQuery q;
} *cmd;
int ret;
- struct vmw_resource *ctx;
cmd = container_of(header, struct vmw_query_cmd, header);
ret = vmw_cmd_cid_check(dev_priv, sw_context, header);
@@ -524,16 +820,6 @@ static int vmw_cmd_wait_query(struct vmw_private *dev_priv,
return ret;
vmw_dmabuf_unreference(&vmw_bo);
-
- /*
- * This wait will act as a barrier for previous waits for this
- * context.
- */
-
- ctx = sw_context->cur_ctx;
- if (!list_empty(&ctx->query_head))
- list_del_init(&ctx->query_head);
-
return 0;
}
@@ -542,14 +828,12 @@ static int vmw_cmd_dma(struct vmw_private *dev_priv,
SVGA3dCmdHeader *header)
{
struct vmw_dma_buffer *vmw_bo = NULL;
- struct ttm_buffer_object *bo;
struct vmw_surface *srf = NULL;
struct vmw_dma_cmd {
SVGA3dCmdHeader header;
SVGA3dCmdSurfaceDMA dma;
} *cmd;
int ret;
- struct vmw_resource *res;
cmd = container_of(header, struct vmw_dma_cmd, header);
ret = vmw_translate_guest_ptr(dev_priv, sw_context,
@@ -558,37 +842,20 @@ static int vmw_cmd_dma(struct vmw_private *dev_priv,
if (unlikely(ret != 0))
return ret;
- bo = &vmw_bo->base;
- ret = vmw_user_surface_lookup_handle(dev_priv, sw_context->tfile,
- cmd->dma.host.sid, &srf);
- if (ret) {
- DRM_ERROR("could not find surface\n");
- goto out_no_reloc;
- }
-
- ret = vmw_surface_validate(dev_priv, srf);
+ ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
+ user_surface_converter, &cmd->dma.host.sid,
+ NULL);
if (unlikely(ret != 0)) {
- if (ret != -ERESTARTSYS)
- DRM_ERROR("Culd not validate surface.\n");
- goto out_no_validate;
+ if (unlikely(ret != -ERESTARTSYS))
+ DRM_ERROR("could not find surface for DMA.\n");
+ goto out_no_surface;
}
- /*
- * Patch command stream with device SID.
- */
- cmd->dma.host.sid = srf->res.id;
- vmw_kms_cursor_snoop(srf, sw_context->tfile, bo, header);
-
- vmw_dmabuf_unreference(&vmw_bo);
-
- res = &srf->res;
- vmw_resource_to_validate_list(sw_context, &res);
+ srf = vmw_res_to_srf(sw_context->res_cache[vmw_res_surface].res);
- return 0;
+ vmw_kms_cursor_snoop(srf, sw_context->tfile, &vmw_bo->base, header);
-out_no_validate:
- vmw_surface_unreference(&srf);
-out_no_reloc:
+out_no_surface:
vmw_dmabuf_unreference(&vmw_bo);
return ret;
}
@@ -621,8 +888,9 @@ static int vmw_cmd_draw(struct vmw_private *dev_priv,
}
for (i = 0; i < cmd->body.numVertexDecls; ++i, ++decl) {
- ret = vmw_cmd_sid_check(dev_priv, sw_context,
- &decl->array.surfaceId);
+ ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
+ user_surface_converter,
+ &decl->array.surfaceId, NULL);
if (unlikely(ret != 0))
return ret;
}
@@ -636,8 +904,9 @@ static int vmw_cmd_draw(struct vmw_private *dev_priv,
range = (SVGA3dPrimitiveRange *) decl;
for (i = 0; i < cmd->body.numRanges; ++i, ++range) {
- ret = vmw_cmd_sid_check(dev_priv, sw_context,
- &range->indexArray.surfaceId);
+ ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
+ user_surface_converter,
+ &range->indexArray.surfaceId, NULL);
if (unlikely(ret != 0))
return ret;
}
@@ -668,8 +937,9 @@ static int vmw_cmd_tex_state(struct vmw_private *dev_priv,
if (likely(cur_state->name != SVGA3D_TS_BIND_TEXTURE))
continue;
- ret = vmw_cmd_sid_check(dev_priv, sw_context,
- &cur_state->value);
+ ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
+ user_surface_converter,
+ &cur_state->value, NULL);
if (unlikely(ret != 0))
return ret;
}
@@ -700,6 +970,34 @@ static int vmw_cmd_check_define_gmrfb(struct vmw_private *dev_priv,
return ret;
}
+/**
+ * vmw_cmd_set_shader - Validate an SVGA_3D_CMD_SET_SHADER
+ * command
+ *
+ * @dev_priv: Pointer to a device private struct.
+ * @sw_context: The software context being used for this batch.
+ * @header: Pointer to the command header in the command stream.
+ */
+static int vmw_cmd_set_shader(struct vmw_private *dev_priv,
+ struct vmw_sw_context *sw_context,
+ SVGA3dCmdHeader *header)
+{
+ struct vmw_set_shader_cmd {
+ SVGA3dCmdHeader header;
+ SVGA3dCmdSetShader body;
+ } *cmd;
+ int ret;
+
+ cmd = container_of(header, struct vmw_set_shader_cmd,
+ header);
+
+ ret = vmw_cmd_cid_check(dev_priv, sw_context, header);
+ if (unlikely(ret != 0))
+ return ret;
+
+ return 0;
+}
+
static int vmw_cmd_check_not_3d(struct vmw_private *dev_priv,
struct vmw_sw_context *sw_context,
void *buf, uint32_t *size)
@@ -773,16 +1071,20 @@ static vmw_cmd_func vmw_cmd_funcs[SVGA_3D_CMD_MAX] = {
VMW_CMD_DEF(SVGA_3D_CMD_PRESENT, &vmw_cmd_present_check),
VMW_CMD_DEF(SVGA_3D_CMD_SHADER_DEFINE, &vmw_cmd_cid_check),
VMW_CMD_DEF(SVGA_3D_CMD_SHADER_DESTROY, &vmw_cmd_cid_check),
- VMW_CMD_DEF(SVGA_3D_CMD_SET_SHADER, &vmw_cmd_cid_check),
+ VMW_CMD_DEF(SVGA_3D_CMD_SET_SHADER, &vmw_cmd_set_shader),
VMW_CMD_DEF(SVGA_3D_CMD_SET_SHADER_CONST, &vmw_cmd_cid_check),
VMW_CMD_DEF(SVGA_3D_CMD_DRAW_PRIMITIVES, &vmw_cmd_draw),
VMW_CMD_DEF(SVGA_3D_CMD_SETSCISSORRECT, &vmw_cmd_cid_check),
- VMW_CMD_DEF(SVGA_3D_CMD_BEGIN_QUERY, &vmw_cmd_cid_check),
+ VMW_CMD_DEF(SVGA_3D_CMD_BEGIN_QUERY, &vmw_cmd_begin_query),
VMW_CMD_DEF(SVGA_3D_CMD_END_QUERY, &vmw_cmd_end_query),
VMW_CMD_DEF(SVGA_3D_CMD_WAIT_FOR_QUERY, &vmw_cmd_wait_query),
VMW_CMD_DEF(SVGA_3D_CMD_PRESENT_READBACK, &vmw_cmd_ok),
VMW_CMD_DEF(SVGA_3D_CMD_BLIT_SURFACE_TO_SCREEN,
- &vmw_cmd_blt_surf_screen_check)
+ &vmw_cmd_blt_surf_screen_check),
+ VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_DEFINE_V2, &vmw_cmd_invalid),
+ VMW_CMD_DEF(SVGA_3D_CMD_GENERATE_MIPMAPS, &vmw_cmd_invalid),
+ VMW_CMD_DEF(SVGA_3D_CMD_ACTIVATE_SURFACE, &vmw_cmd_invalid),
+ VMW_CMD_DEF(SVGA_3D_CMD_DEACTIVATE_SURFACE, &vmw_cmd_invalid),
};
static int vmw_cmd_check(struct vmw_private *dev_priv,
@@ -829,6 +1131,8 @@ static int vmw_cmd_check_all(struct vmw_private *dev_priv,
int32_t cur_size = size;
int ret;
+ sw_context->buf_start = buf;
+
while (cur_size > 0) {
size = cur_size;
ret = vmw_cmd_check(dev_priv, sw_context, buf, &size);
@@ -860,43 +1164,63 @@ static void vmw_apply_relocations(struct vmw_sw_context *sw_context)
for (i = 0; i < sw_context->cur_reloc; ++i) {
reloc = &sw_context->relocs[i];
- validate = &sw_context->val_bufs[reloc->index];
+ validate = &sw_context->val_bufs[reloc->index].base;
bo = validate->bo;
- if (bo->mem.mem_type == TTM_PL_VRAM) {
+ switch (bo->mem.mem_type) {
+ case TTM_PL_VRAM:
reloc->location->offset += bo->offset;
reloc->location->gmrId = SVGA_GMR_FRAMEBUFFER;
- } else
+ break;
+ case VMW_PL_GMR:
reloc->location->gmrId = bo->mem.start;
+ break;
+ default:
+ BUG();
+ }
}
vmw_free_relocations(sw_context);
}
+/**
+ * vmw_resource_list_unrefererence - Free up a resource list and unreference
+ * all resources referenced by it.
+ *
+ * @list: The resource list.
+ */
+static void vmw_resource_list_unreference(struct list_head *list)
+{
+ struct vmw_resource_val_node *val, *val_next;
+
+ /*
+ * Drop references to resources held during command submission.
+ */
+
+ list_for_each_entry_safe(val, val_next, list, head) {
+ list_del_init(&val->head);
+ vmw_resource_unreference(&val->res);
+ kfree(val);
+ }
+}
+
static void vmw_clear_validations(struct vmw_sw_context *sw_context)
{
- struct ttm_validate_buffer *entry, *next;
- struct vmw_resource *res, *res_next;
+ struct vmw_validate_buffer *entry, *next;
+ struct vmw_resource_val_node *val;
/*
* Drop references to DMA buffers held during command submission.
*/
list_for_each_entry_safe(entry, next, &sw_context->validate_nodes,
- head) {
- list_del(&entry->head);
- vmw_dmabuf_validate_clear(entry->bo);
- ttm_bo_unref(&entry->bo);
+ base.head) {
+ list_del(&entry->base.head);
+ ttm_bo_unref(&entry->base.bo);
+ (void) drm_ht_remove_item(&sw_context->res_ht, &entry->hash);
sw_context->cur_val_buf--;
}
BUG_ON(sw_context->cur_val_buf != 0);
- /*
- * Drop references to resources held during command submission.
- */
- vmw_resource_unreserve(&sw_context->resource_list);
- list_for_each_entry_safe(res, res_next, &sw_context->resource_list,
- validate_head) {
- list_del_init(&res->validate_head);
- vmw_resource_unreference(&res);
- }
+ list_for_each_entry(val, &sw_context->resource_list, head)
+ (void) drm_ht_remove_item(&sw_context->res_ht, &val->hash);
}
static int vmw_validate_single_buffer(struct vmw_private *dev_priv,
@@ -921,7 +1245,7 @@ static int vmw_validate_single_buffer(struct vmw_private *dev_priv,
* used as a GMR, this will return -ENOMEM.
*/
- ret = ttm_bo_validate(bo, &vmw_vram_gmr_placement, true, false, false);
+ ret = ttm_bo_validate(bo, &vmw_vram_gmr_placement, true, false);
if (likely(ret == 0 || ret == -ERESTARTSYS))
return ret;
@@ -931,7 +1255,7 @@ static int vmw_validate_single_buffer(struct vmw_private *dev_priv,
*/
DRM_INFO("Falling through to VRAM.\n");
- ret = ttm_bo_validate(bo, &vmw_vram_placement, true, false, false);
+ ret = ttm_bo_validate(bo, &vmw_vram_placement, true, false);
return ret;
}
@@ -939,11 +1263,11 @@ static int vmw_validate_single_buffer(struct vmw_private *dev_priv,
static int vmw_validate_buffers(struct vmw_private *dev_priv,
struct vmw_sw_context *sw_context)
{
- struct ttm_validate_buffer *entry;
+ struct vmw_validate_buffer *entry;
int ret;
- list_for_each_entry(entry, &sw_context->validate_nodes, head) {
- ret = vmw_validate_single_buffer(dev_priv, entry->bo);
+ list_for_each_entry(entry, &sw_context->validate_nodes, base.head) {
+ ret = vmw_validate_single_buffer(dev_priv, entry->base.bo);
if (unlikely(ret != 0))
return ret;
}
@@ -1106,6 +1430,8 @@ int vmw_execbuf_process(struct drm_file *file_priv,
{
struct vmw_sw_context *sw_context = &dev_priv->ctx;
struct vmw_fence_obj *fence = NULL;
+ struct vmw_resource *error_resource;
+ struct list_head resource_list;
uint32_t handle;
void *cmd;
int ret;
@@ -1135,24 +1461,33 @@ int vmw_execbuf_process(struct drm_file *file_priv,
sw_context->kernel = true;
sw_context->tfile = vmw_fpriv(file_priv)->tfile;
- sw_context->cid_valid = false;
- sw_context->sid_valid = false;
sw_context->cur_reloc = 0;
sw_context->cur_val_buf = 0;
sw_context->fence_flags = 0;
- INIT_LIST_HEAD(&sw_context->query_list);
INIT_LIST_HEAD(&sw_context->resource_list);
sw_context->cur_query_bo = dev_priv->pinned_bo;
- sw_context->cur_query_cid = dev_priv->query_cid;
- sw_context->query_cid_valid = (dev_priv->pinned_bo != NULL);
-
+ sw_context->last_query_ctx = NULL;
+ sw_context->needs_post_query_barrier = false;
+ memset(sw_context->res_cache, 0, sizeof(sw_context->res_cache));
INIT_LIST_HEAD(&sw_context->validate_nodes);
+ INIT_LIST_HEAD(&sw_context->res_relocations);
+ if (!sw_context->res_ht_initialized) {
+ ret = drm_ht_create(&sw_context->res_ht, VMW_RES_HT_ORDER);
+ if (unlikely(ret != 0))
+ goto out_unlock;
+ sw_context->res_ht_initialized = true;
+ }
+ INIT_LIST_HEAD(&resource_list);
ret = vmw_cmd_check_all(dev_priv, sw_context, kernel_commands,
command_size);
if (unlikely(ret != 0))
goto out_err;
+ ret = vmw_resources_reserve(sw_context);
+ if (unlikely(ret != 0))
+ goto out_err;
+
ret = ttm_eu_reserve_buffers(&sw_context->validate_nodes);
if (unlikely(ret != 0))
goto out_err;
@@ -1161,24 +1496,31 @@ int vmw_execbuf_process(struct drm_file *file_priv,
if (unlikely(ret != 0))
goto out_err;
- vmw_apply_relocations(sw_context);
+ ret = vmw_resources_validate(sw_context);
+ if (unlikely(ret != 0))
+ goto out_err;
if (throttle_us) {
ret = vmw_wait_lag(dev_priv, &dev_priv->fifo.marker_queue,
throttle_us);
if (unlikely(ret != 0))
- goto out_throttle;
+ goto out_err;
}
cmd = vmw_fifo_reserve(dev_priv, command_size);
if (unlikely(cmd == NULL)) {
DRM_ERROR("Failed reserving fifo space for commands.\n");
ret = -ENOMEM;
- goto out_throttle;
+ goto out_err;
}
+ vmw_apply_relocations(sw_context);
memcpy(cmd, kernel_commands, command_size);
+
+ vmw_resource_relocations_apply(cmd, &sw_context->res_relocations);
+ vmw_resource_relocations_free(&sw_context->res_relocations);
+
vmw_fifo_commit(dev_priv, command_size);
vmw_query_bo_switch_commit(dev_priv, sw_context);
@@ -1194,9 +1536,14 @@ int vmw_execbuf_process(struct drm_file *file_priv,
if (ret != 0)
DRM_ERROR("Fence submission error. Syncing.\n");
+ vmw_resource_list_unreserve(&sw_context->resource_list, false);
ttm_eu_fence_buffer_objects(&sw_context->validate_nodes,
(void *) fence);
+ if (unlikely(dev_priv->pinned_bo != NULL &&
+ !dev_priv->query_cid_valid))
+ __vmw_execbuf_release_pinned_bo(dev_priv, fence);
+
vmw_clear_validations(sw_context);
vmw_execbuf_copy_fence_user(dev_priv, vmw_fpriv(file_priv), ret,
user_fence_rep, fence, handle);
@@ -1209,17 +1556,40 @@ int vmw_execbuf_process(struct drm_file *file_priv,
vmw_fence_obj_unreference(&fence);
}
+ list_splice_init(&sw_context->resource_list, &resource_list);
mutex_unlock(&dev_priv->cmdbuf_mutex);
+
+ /*
+ * Unreference resources outside of the cmdbuf_mutex to
+ * avoid deadlocks in resource destruction paths.
+ */
+ vmw_resource_list_unreference(&resource_list);
+
return 0;
out_err:
+ vmw_resource_relocations_free(&sw_context->res_relocations);
vmw_free_relocations(sw_context);
-out_throttle:
- vmw_query_switch_backoff(sw_context);
ttm_eu_backoff_reservation(&sw_context->validate_nodes);
+ vmw_resource_list_unreserve(&sw_context->resource_list, true);
vmw_clear_validations(sw_context);
+ if (unlikely(dev_priv->pinned_bo != NULL &&
+ !dev_priv->query_cid_valid))
+ __vmw_execbuf_release_pinned_bo(dev_priv, NULL);
out_unlock:
+ list_splice_init(&sw_context->resource_list, &resource_list);
+ error_resource = sw_context->error_resource;
+ sw_context->error_resource = NULL;
mutex_unlock(&dev_priv->cmdbuf_mutex);
+
+ /*
+ * Unreference resources outside of the cmdbuf_mutex to
+ * avoid deadlocks in resource destruction paths.
+ */
+ vmw_resource_list_unreference(&resource_list);
+ if (unlikely(error_resource != NULL))
+ vmw_resource_unreference(&error_resource);
+
return ret;
}
@@ -1244,13 +1614,13 @@ static void vmw_execbuf_unpin_panic(struct vmw_private *dev_priv)
/**
- * vmw_execbuf_release_pinned_bo - Flush queries and unpin the pinned
+ * __vmw_execbuf_release_pinned_bo - Flush queries and unpin the pinned
* query bo.
*
* @dev_priv: The device private structure.
- * @only_on_cid_match: Only flush and unpin if the current active query cid
- * matches @cid.
- * @cid: Optional context id to match.
+ * @fence: If non-NULL should point to a struct vmw_fence_obj issued
+ * _after_ a query barrier that flushes all queries touching the current
+ * buffer pointed to by @dev_priv->pinned_bo
*
* This function should be used to unpin the pinned query bo, or
* as a query barrier when we need to make sure that all queries have
@@ -1263,23 +1633,21 @@ static void vmw_execbuf_unpin_panic(struct vmw_private *dev_priv)
*
* The function will synchronize on the previous query barrier, and will
* thus not finish until that barrier has executed.
+ *
+ * the @dev_priv->cmdbuf_mutex needs to be held by the current thread
+ * before calling this function.
*/
-void vmw_execbuf_release_pinned_bo(struct vmw_private *dev_priv,
- bool only_on_cid_match, uint32_t cid)
+void __vmw_execbuf_release_pinned_bo(struct vmw_private *dev_priv,
+ struct vmw_fence_obj *fence)
{
int ret = 0;
struct list_head validate_list;
struct ttm_validate_buffer pinned_val, query_val;
- struct vmw_fence_obj *fence;
-
- mutex_lock(&dev_priv->cmdbuf_mutex);
+ struct vmw_fence_obj *lfence = NULL;
if (dev_priv->pinned_bo == NULL)
goto out_unlock;
- if (only_on_cid_match && cid != dev_priv->query_cid)
- goto out_unlock;
-
INIT_LIST_HEAD(&validate_list);
pinned_val.bo = ttm_bo_reference(dev_priv->pinned_bo);
@@ -1297,25 +1665,34 @@ void vmw_execbuf_release_pinned_bo(struct vmw_private *dev_priv,
goto out_no_reserve;
}
- ret = vmw_fifo_emit_dummy_query(dev_priv, dev_priv->query_cid);
- if (unlikely(ret != 0)) {
- vmw_execbuf_unpin_panic(dev_priv);
- goto out_no_emit;
+ if (dev_priv->query_cid_valid) {
+ BUG_ON(fence != NULL);
+ ret = vmw_fifo_emit_dummy_query(dev_priv, dev_priv->query_cid);
+ if (unlikely(ret != 0)) {
+ vmw_execbuf_unpin_panic(dev_priv);
+ goto out_no_emit;
+ }
+ dev_priv->query_cid_valid = false;
}
vmw_bo_pin(dev_priv->pinned_bo, false);
vmw_bo_pin(dev_priv->dummy_query_bo, false);
dev_priv->dummy_query_bo_pinned = false;
- (void) vmw_execbuf_fence_commands(NULL, dev_priv, &fence, NULL);
+ if (fence == NULL) {
+ (void) vmw_execbuf_fence_commands(NULL, dev_priv, &lfence,
+ NULL);
+ fence = lfence;
+ }
ttm_eu_fence_buffer_objects(&validate_list, (void *) fence);
+ if (lfence != NULL)
+ vmw_fence_obj_unreference(&lfence);
ttm_bo_unref(&query_val.bo);
ttm_bo_unref(&pinned_val.bo);
ttm_bo_unref(&dev_priv->pinned_bo);
out_unlock:
- mutex_unlock(&dev_priv->cmdbuf_mutex);
return;
out_no_emit:
@@ -1324,6 +1701,31 @@ out_no_reserve:
ttm_bo_unref(&query_val.bo);
ttm_bo_unref(&pinned_val.bo);
ttm_bo_unref(&dev_priv->pinned_bo);
+}
+
+/**
+ * vmw_execbuf_release_pinned_bo - Flush queries and unpin the pinned
+ * query bo.
+ *
+ * @dev_priv: The device private structure.
+ *
+ * This function should be used to unpin the pinned query bo, or
+ * as a query barrier when we need to make sure that all queries have
+ * finished before the next fifo command. (For example on hardware
+ * context destructions where the hardware may otherwise leak unfinished
+ * queries).
+ *
+ * This function does not return any failure codes, but make attempts
+ * to do safe unpinning in case of errors.
+ *
+ * The function will synchronize on the previous query barrier, and will
+ * thus not finish until that barrier has executed.
+ */
+void vmw_execbuf_release_pinned_bo(struct vmw_private *dev_priv)
+{
+ mutex_lock(&dev_priv->cmdbuf_mutex);
+ if (dev_priv->query_cid_valid)
+ __vmw_execbuf_release_pinned_bo(dev_priv, NULL);
mutex_unlock(&dev_priv->cmdbuf_mutex);
}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c
index bc187fafd58c..c62d20e8a6f1 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c
@@ -537,7 +537,7 @@ static void vmw_user_fence_destroy(struct vmw_fence_obj *fence)
container_of(fence, struct vmw_user_fence, fence);
struct vmw_fence_manager *fman = fence->fman;
- kfree(ufence);
+ ttm_base_object_kfree(ufence, base);
/*
* Free kernel space accounting.
*/
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c b/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c
index b07ca2e4d04b..d9fbbe191071 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c
@@ -110,6 +110,8 @@ int vmw_get_cap_3d_ioctl(struct drm_device *dev, void *data,
memcpy_fromio(bounce, &fifo_mem[SVGA_FIFO_3D_CAPS], size);
ret = copy_to_user(buffer, bounce, size);
+ if (ret)
+ ret = -EFAULT;
vfree(bounce);
if (unlikely(ret != 0))
@@ -131,6 +133,7 @@ int vmw_present_ioctl(struct drm_device *dev, void *data,
struct drm_vmw_rect *clips = NULL;
struct drm_mode_object *obj;
struct vmw_framebuffer *vfb;
+ struct vmw_resource *res;
uint32_t num_clips;
int ret;
@@ -178,11 +181,13 @@ int vmw_present_ioctl(struct drm_device *dev, void *data,
if (unlikely(ret != 0))
goto out_no_ttm_lock;
- ret = vmw_user_surface_lookup_handle(dev_priv, tfile, arg->sid,
- &surface);
+ ret = vmw_user_resource_lookup_handle(dev_priv, tfile, arg->sid,
+ user_surface_converter,
+ &res);
if (ret)
goto out_no_surface;
+ surface = vmw_res_to_srf(res);
ret = vmw_kms_present(dev_priv, file_priv,
vfb, surface, arg->sid,
arg->dest_x, arg->dest_y,
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c b/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c
index 070fb239c5af..79f7e8e60529 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c
@@ -373,7 +373,7 @@ static int vmw_ldu_init(struct vmw_private *dev_priv, unsigned unit)
drm_mode_crtc_set_gamma_size(crtc, 256);
- drm_connector_attach_property(connector,
+ drm_object_attach_property(&connector->base,
dev->mode_config.dirty_info_property,
1);
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
index 292c988c54ea..e01a17b407b2 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
@@ -30,17 +30,7 @@
#include <drm/ttm/ttm_object.h>
#include <drm/ttm/ttm_placement.h>
#include <drm/drmP.h>
-
-struct vmw_user_context {
- struct ttm_base_object base;
- struct vmw_resource res;
-};
-
-struct vmw_user_surface {
- struct ttm_base_object base;
- struct vmw_surface srf;
- uint32_t size;
-};
+#include "vmwgfx_resource_priv.h"
struct vmw_user_dma_buffer {
struct ttm_base_object base;
@@ -62,17 +52,21 @@ struct vmw_user_stream {
struct vmw_stream stream;
};
-struct vmw_surface_offset {
- uint32_t face;
- uint32_t mip;
- uint32_t bo_offset;
-};
-
-static uint64_t vmw_user_context_size;
-static uint64_t vmw_user_surface_size;
static uint64_t vmw_user_stream_size;
+static const struct vmw_res_func vmw_stream_func = {
+ .res_type = vmw_res_stream,
+ .needs_backup = false,
+ .may_evict = false,
+ .type_name = "video streams",
+ .backup_placement = NULL,
+ .create = NULL,
+ .destroy = NULL,
+ .bind = NULL,
+ .unbind = NULL
+};
+
static inline struct vmw_dma_buffer *
vmw_dma_buffer(struct ttm_buffer_object *bo)
{
@@ -100,13 +94,14 @@ struct vmw_resource *vmw_resource_reference(struct vmw_resource *res)
*
* Release the resource id to the resource id manager and set it to -1
*/
-static void vmw_resource_release_id(struct vmw_resource *res)
+void vmw_resource_release_id(struct vmw_resource *res)
{
struct vmw_private *dev_priv = res->dev_priv;
+ struct idr *idr = &dev_priv->res_idr[res->func->res_type];
write_lock(&dev_priv->resource_lock);
if (res->id != -1)
- idr_remove(res->idr, res->id);
+ idr_remove(idr, res->id);
res->id = -1;
write_unlock(&dev_priv->resource_lock);
}
@@ -116,17 +111,33 @@ static void vmw_resource_release(struct kref *kref)
struct vmw_resource *res =
container_of(kref, struct vmw_resource, kref);
struct vmw_private *dev_priv = res->dev_priv;
- int id = res->id;
- struct idr *idr = res->idr;
+ int id;
+ struct idr *idr = &dev_priv->res_idr[res->func->res_type];
res->avail = false;
- if (res->remove_from_lists != NULL)
- res->remove_from_lists(res);
+ list_del_init(&res->lru_head);
write_unlock(&dev_priv->resource_lock);
+ if (res->backup) {
+ struct ttm_buffer_object *bo = &res->backup->base;
+
+ ttm_bo_reserve(bo, false, false, false, 0);
+ if (!list_empty(&res->mob_head) &&
+ res->func->unbind != NULL) {
+ struct ttm_validate_buffer val_buf;
+
+ val_buf.bo = bo;
+ res->func->unbind(res, false, &val_buf);
+ }
+ res->backup_dirty = false;
+ list_del_init(&res->mob_head);
+ ttm_bo_unreserve(bo);
+ vmw_dmabuf_unreference(&res->backup);
+ }
if (likely(res->hw_destroy != NULL))
res->hw_destroy(res);
+ id = res->id;
if (res->res_free != NULL)
res->res_free(res);
else
@@ -153,25 +164,25 @@ void vmw_resource_unreference(struct vmw_resource **p_res)
/**
* vmw_resource_alloc_id - release a resource id to the id manager.
*
- * @dev_priv: Pointer to the device private structure.
* @res: Pointer to the resource.
*
* Allocate the lowest free resource from the resource manager, and set
* @res->id to that id. Returns 0 on success and -ENOMEM on failure.
*/
-static int vmw_resource_alloc_id(struct vmw_private *dev_priv,
- struct vmw_resource *res)
+int vmw_resource_alloc_id(struct vmw_resource *res)
{
+ struct vmw_private *dev_priv = res->dev_priv;
int ret;
+ struct idr *idr = &dev_priv->res_idr[res->func->res_type];
BUG_ON(res->id != -1);
do {
- if (unlikely(idr_pre_get(res->idr, GFP_KERNEL) == 0))
+ if (unlikely(idr_pre_get(idr, GFP_KERNEL) == 0))
return -ENOMEM;
write_lock(&dev_priv->resource_lock);
- ret = idr_get_new_above(res->idr, res, 1, &res->id);
+ ret = idr_get_new_above(idr, res, 1, &res->id);
write_unlock(&dev_priv->resource_lock);
} while (ret == -EAGAIN);
@@ -179,31 +190,39 @@ static int vmw_resource_alloc_id(struct vmw_private *dev_priv,
return ret;
}
-
-static int vmw_resource_init(struct vmw_private *dev_priv,
- struct vmw_resource *res,
- struct idr *idr,
- enum ttm_object_type obj_type,
- bool delay_id,
- void (*res_free) (struct vmw_resource *res),
- void (*remove_from_lists)
- (struct vmw_resource *res))
+/**
+ * vmw_resource_init - initialize a struct vmw_resource
+ *
+ * @dev_priv: Pointer to a device private struct.
+ * @res: The struct vmw_resource to initialize.
+ * @obj_type: Resource object type.
+ * @delay_id: Boolean whether to defer device id allocation until
+ * the first validation.
+ * @res_free: Resource destructor.
+ * @func: Resource function table.
+ */
+int vmw_resource_init(struct vmw_private *dev_priv, struct vmw_resource *res,
+ bool delay_id,
+ void (*res_free) (struct vmw_resource *res),
+ const struct vmw_res_func *func)
{
kref_init(&res->kref);
res->hw_destroy = NULL;
res->res_free = res_free;
- res->remove_from_lists = remove_from_lists;
- res->res_type = obj_type;
- res->idr = idr;
res->avail = false;
res->dev_priv = dev_priv;
- INIT_LIST_HEAD(&res->query_head);
- INIT_LIST_HEAD(&res->validate_head);
+ res->func = func;
+ INIT_LIST_HEAD(&res->lru_head);
+ INIT_LIST_HEAD(&res->mob_head);
res->id = -1;
+ res->backup = NULL;
+ res->backup_offset = 0;
+ res->backup_dirty = false;
+ res->res_dirty = false;
if (delay_id)
return 0;
else
- return vmw_resource_alloc_id(dev_priv, res);
+ return vmw_resource_alloc_id(res);
}
/**
@@ -218,9 +237,8 @@ static int vmw_resource_init(struct vmw_private *dev_priv,
* Activate basically means that the function vmw_resource_lookup will
* find it.
*/
-
-static void vmw_resource_activate(struct vmw_resource *res,
- void (*hw_destroy) (struct vmw_resource *))
+void vmw_resource_activate(struct vmw_resource *res,
+ void (*hw_destroy) (struct vmw_resource *))
{
struct vmw_private *dev_priv = res->dev_priv;
@@ -250,990 +268,41 @@ struct vmw_resource *vmw_resource_lookup(struct vmw_private *dev_priv,
}
/**
- * Context management:
- */
-
-static void vmw_hw_context_destroy(struct vmw_resource *res)
-{
-
- struct vmw_private *dev_priv = res->dev_priv;
- struct {
- SVGA3dCmdHeader header;
- SVGA3dCmdDestroyContext body;
- } *cmd;
-
-
- vmw_execbuf_release_pinned_bo(dev_priv, true, res->id);
-
- cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
- if (unlikely(cmd == NULL)) {
- DRM_ERROR("Failed reserving FIFO space for surface "
- "destruction.\n");
- return;
- }
-
- cmd->header.id = cpu_to_le32(SVGA_3D_CMD_CONTEXT_DESTROY);
- cmd->header.size = cpu_to_le32(sizeof(cmd->body));
- cmd->body.cid = cpu_to_le32(res->id);
-
- vmw_fifo_commit(dev_priv, sizeof(*cmd));
- vmw_3d_resource_dec(dev_priv, false);
-}
-
-static int vmw_context_init(struct vmw_private *dev_priv,
- struct vmw_resource *res,
- void (*res_free) (struct vmw_resource *res))
-{
- int ret;
-
- struct {
- SVGA3dCmdHeader header;
- SVGA3dCmdDefineContext body;
- } *cmd;
-
- ret = vmw_resource_init(dev_priv, res, &dev_priv->context_idr,
- VMW_RES_CONTEXT, false, res_free, NULL);
-
- if (unlikely(ret != 0)) {
- DRM_ERROR("Failed to allocate a resource id.\n");
- goto out_early;
- }
-
- if (unlikely(res->id >= SVGA3D_MAX_CONTEXT_IDS)) {
- DRM_ERROR("Out of hw context ids.\n");
- vmw_resource_unreference(&res);
- return -ENOMEM;
- }
-
- cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
- if (unlikely(cmd == NULL)) {
- DRM_ERROR("Fifo reserve failed.\n");
- vmw_resource_unreference(&res);
- return -ENOMEM;
- }
-
- cmd->header.id = cpu_to_le32(SVGA_3D_CMD_CONTEXT_DEFINE);
- cmd->header.size = cpu_to_le32(sizeof(cmd->body));
- cmd->body.cid = cpu_to_le32(res->id);
-
- vmw_fifo_commit(dev_priv, sizeof(*cmd));
- (void) vmw_3d_resource_inc(dev_priv, false);
- vmw_resource_activate(res, vmw_hw_context_destroy);
- return 0;
-
-out_early:
- if (res_free == NULL)
- kfree(res);
- else
- res_free(res);
- return ret;
-}
-
-struct vmw_resource *vmw_context_alloc(struct vmw_private *dev_priv)
-{
- struct vmw_resource *res = kmalloc(sizeof(*res), GFP_KERNEL);
- int ret;
-
- if (unlikely(res == NULL))
- return NULL;
-
- ret = vmw_context_init(dev_priv, res, NULL);
- return (ret == 0) ? res : NULL;
-}
-
-/**
- * User-space context management:
- */
-
-static void vmw_user_context_free(struct vmw_resource *res)
-{
- struct vmw_user_context *ctx =
- container_of(res, struct vmw_user_context, res);
- struct vmw_private *dev_priv = res->dev_priv;
-
- ttm_base_object_kfree(ctx, base);
- ttm_mem_global_free(vmw_mem_glob(dev_priv),
- vmw_user_context_size);
-}
-
-/**
- * This function is called when user space has no more references on the
- * base object. It releases the base-object's reference on the resource object.
- */
-
-static void vmw_user_context_base_release(struct ttm_base_object **p_base)
-{
- struct ttm_base_object *base = *p_base;
- struct vmw_user_context *ctx =
- container_of(base, struct vmw_user_context, base);
- struct vmw_resource *res = &ctx->res;
-
- *p_base = NULL;
- vmw_resource_unreference(&res);
-}
-
-int vmw_context_destroy_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
-{
- struct vmw_private *dev_priv = vmw_priv(dev);
- struct vmw_resource *res;
- struct vmw_user_context *ctx;
- struct drm_vmw_context_arg *arg = (struct drm_vmw_context_arg *)data;
- struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
- int ret = 0;
-
- res = vmw_resource_lookup(dev_priv, &dev_priv->context_idr, arg->cid);
- if (unlikely(res == NULL))
- return -EINVAL;
-
- if (res->res_free != &vmw_user_context_free) {
- ret = -EINVAL;
- goto out;
- }
-
- ctx = container_of(res, struct vmw_user_context, res);
- if (ctx->base.tfile != tfile && !ctx->base.shareable) {
- ret = -EPERM;
- goto out;
- }
-
- ttm_ref_object_base_unref(tfile, ctx->base.hash.key, TTM_REF_USAGE);
-out:
- vmw_resource_unreference(&res);
- return ret;
-}
-
-int vmw_context_define_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
-{
- struct vmw_private *dev_priv = vmw_priv(dev);
- struct vmw_user_context *ctx;
- struct vmw_resource *res;
- struct vmw_resource *tmp;
- struct drm_vmw_context_arg *arg = (struct drm_vmw_context_arg *)data;
- struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
- struct vmw_master *vmaster = vmw_master(file_priv->master);
- int ret;
-
-
- /*
- * Approximate idr memory usage with 128 bytes. It will be limited
- * by maximum number_of contexts anyway.
- */
-
- if (unlikely(vmw_user_context_size == 0))
- vmw_user_context_size = ttm_round_pot(sizeof(*ctx)) + 128;
-
- ret = ttm_read_lock(&vmaster->lock, true);
- if (unlikely(ret != 0))
- return ret;
-
- ret = ttm_mem_global_alloc(vmw_mem_glob(dev_priv),
- vmw_user_context_size,
- false, true);
- if (unlikely(ret != 0)) {
- if (ret != -ERESTARTSYS)
- DRM_ERROR("Out of graphics memory for context"
- " creation.\n");
- goto out_unlock;
- }
-
- ctx = kmalloc(sizeof(*ctx), GFP_KERNEL);
- if (unlikely(ctx == NULL)) {
- ttm_mem_global_free(vmw_mem_glob(dev_priv),
- vmw_user_context_size);
- ret = -ENOMEM;
- goto out_unlock;
- }
-
- res = &ctx->res;
- ctx->base.shareable = false;
- ctx->base.tfile = NULL;
-
- /*
- * From here on, the destructor takes over resource freeing.
- */
-
- ret = vmw_context_init(dev_priv, res, vmw_user_context_free);
- if (unlikely(ret != 0))
- goto out_unlock;
-
- tmp = vmw_resource_reference(&ctx->res);
- ret = ttm_base_object_init(tfile, &ctx->base, false, VMW_RES_CONTEXT,
- &vmw_user_context_base_release, NULL);
-
- if (unlikely(ret != 0)) {
- vmw_resource_unreference(&tmp);
- goto out_err;
- }
-
- arg->cid = res->id;
-out_err:
- vmw_resource_unreference(&res);
-out_unlock:
- ttm_read_unlock(&vmaster->lock);
- return ret;
-
-}
-
-int vmw_context_check(struct vmw_private *dev_priv,
- struct ttm_object_file *tfile,
- int id,
- struct vmw_resource **p_res)
-{
- struct vmw_resource *res;
- int ret = 0;
-
- read_lock(&dev_priv->resource_lock);
- res = idr_find(&dev_priv->context_idr, id);
- if (res && res->avail) {
- struct vmw_user_context *ctx =
- container_of(res, struct vmw_user_context, res);
- if (ctx->base.tfile != tfile && !ctx->base.shareable)
- ret = -EPERM;
- if (p_res)
- *p_res = vmw_resource_reference(res);
- } else
- ret = -EINVAL;
- read_unlock(&dev_priv->resource_lock);
-
- return ret;
-}
-
-struct vmw_bpp {
- uint8_t bpp;
- uint8_t s_bpp;
-};
-
-/*
- * Size table for the supported SVGA3D surface formats. It consists of
- * two values. The bpp value and the s_bpp value which is short for
- * "stride bits per pixel" The values are given in such a way that the
- * minimum stride for the image is calculated using
- *
- * min_stride = w*s_bpp
- *
- * and the total memory requirement for the image is
- *
- * h*min_stride*bpp/s_bpp
- *
- */
-static const struct vmw_bpp vmw_sf_bpp[] = {
- [SVGA3D_FORMAT_INVALID] = {0, 0},
- [SVGA3D_X8R8G8B8] = {32, 32},
- [SVGA3D_A8R8G8B8] = {32, 32},
- [SVGA3D_R5G6B5] = {16, 16},
- [SVGA3D_X1R5G5B5] = {16, 16},
- [SVGA3D_A1R5G5B5] = {16, 16},
- [SVGA3D_A4R4G4B4] = {16, 16},
- [SVGA3D_Z_D32] = {32, 32},
- [SVGA3D_Z_D16] = {16, 16},
- [SVGA3D_Z_D24S8] = {32, 32},
- [SVGA3D_Z_D15S1] = {16, 16},
- [SVGA3D_LUMINANCE8] = {8, 8},
- [SVGA3D_LUMINANCE4_ALPHA4] = {8, 8},
- [SVGA3D_LUMINANCE16] = {16, 16},
- [SVGA3D_LUMINANCE8_ALPHA8] = {16, 16},
- [SVGA3D_DXT1] = {4, 16},
- [SVGA3D_DXT2] = {8, 32},
- [SVGA3D_DXT3] = {8, 32},
- [SVGA3D_DXT4] = {8, 32},
- [SVGA3D_DXT5] = {8, 32},
- [SVGA3D_BUMPU8V8] = {16, 16},
- [SVGA3D_BUMPL6V5U5] = {16, 16},
- [SVGA3D_BUMPX8L8V8U8] = {32, 32},
- [SVGA3D_ARGB_S10E5] = {16, 16},
- [SVGA3D_ARGB_S23E8] = {32, 32},
- [SVGA3D_A2R10G10B10] = {32, 32},
- [SVGA3D_V8U8] = {16, 16},
- [SVGA3D_Q8W8V8U8] = {32, 32},
- [SVGA3D_CxV8U8] = {16, 16},
- [SVGA3D_X8L8V8U8] = {32, 32},
- [SVGA3D_A2W10V10U10] = {32, 32},
- [SVGA3D_ALPHA8] = {8, 8},
- [SVGA3D_R_S10E5] = {16, 16},
- [SVGA3D_R_S23E8] = {32, 32},
- [SVGA3D_RG_S10E5] = {16, 16},
- [SVGA3D_RG_S23E8] = {32, 32},
- [SVGA3D_BUFFER] = {8, 8},
- [SVGA3D_Z_D24X8] = {32, 32},
- [SVGA3D_V16U16] = {32, 32},
- [SVGA3D_G16R16] = {32, 32},
- [SVGA3D_A16B16G16R16] = {64, 64},
- [SVGA3D_UYVY] = {12, 12},
- [SVGA3D_YUY2] = {12, 12},
- [SVGA3D_NV12] = {12, 8},
- [SVGA3D_AYUV] = {32, 32},
- [SVGA3D_BC4_UNORM] = {4, 16},
- [SVGA3D_BC5_UNORM] = {8, 32},
- [SVGA3D_Z_DF16] = {16, 16},
- [SVGA3D_Z_DF24] = {24, 24},
- [SVGA3D_Z_D24S8_INT] = {32, 32}
-};
-
-
-/**
- * Surface management.
- */
-
-struct vmw_surface_dma {
- SVGA3dCmdHeader header;
- SVGA3dCmdSurfaceDMA body;
- SVGA3dCopyBox cb;
- SVGA3dCmdSurfaceDMASuffix suffix;
-};
-
-struct vmw_surface_define {
- SVGA3dCmdHeader header;
- SVGA3dCmdDefineSurface body;
-};
-
-struct vmw_surface_destroy {
- SVGA3dCmdHeader header;
- SVGA3dCmdDestroySurface body;
-};
-
-
-/**
- * vmw_surface_dma_size - Compute fifo size for a dma command.
- *
- * @srf: Pointer to a struct vmw_surface
- *
- * Computes the required size for a surface dma command for backup or
- * restoration of the surface represented by @srf.
- */
-static inline uint32_t vmw_surface_dma_size(const struct vmw_surface *srf)
-{
- return srf->num_sizes * sizeof(struct vmw_surface_dma);
-}
-
-
-/**
- * vmw_surface_define_size - Compute fifo size for a surface define command.
- *
- * @srf: Pointer to a struct vmw_surface
- *
- * Computes the required size for a surface define command for the definition
- * of the surface represented by @srf.
- */
-static inline uint32_t vmw_surface_define_size(const struct vmw_surface *srf)
-{
- return sizeof(struct vmw_surface_define) + srf->num_sizes *
- sizeof(SVGA3dSize);
-}
-
-
-/**
- * vmw_surface_destroy_size - Compute fifo size for a surface destroy command.
+ * vmw_user_resource_lookup_handle - lookup a struct resource from a
+ * TTM user-space handle and perform basic type checks
*
- * Computes the required size for a surface destroy command for the destruction
- * of a hw surface.
- */
-static inline uint32_t vmw_surface_destroy_size(void)
-{
- return sizeof(struct vmw_surface_destroy);
-}
-
-/**
- * vmw_surface_destroy_encode - Encode a surface_destroy command.
- *
- * @id: The surface id
- * @cmd_space: Pointer to memory area in which the commands should be encoded.
- */
-static void vmw_surface_destroy_encode(uint32_t id,
- void *cmd_space)
-{
- struct vmw_surface_destroy *cmd = (struct vmw_surface_destroy *)
- cmd_space;
-
- cmd->header.id = SVGA_3D_CMD_SURFACE_DESTROY;
- cmd->header.size = sizeof(cmd->body);
- cmd->body.sid = id;
-}
-
-/**
- * vmw_surface_define_encode - Encode a surface_define command.
+ * @dev_priv: Pointer to a device private struct
+ * @tfile: Pointer to a struct ttm_object_file identifying the caller
+ * @handle: The TTM user-space handle
+ * @converter: Pointer to an object describing the resource type
+ * @p_res: On successful return the location pointed to will contain
+ * a pointer to a refcounted struct vmw_resource.
*
- * @srf: Pointer to a struct vmw_surface object.
- * @cmd_space: Pointer to memory area in which the commands should be encoded.
+ * If the handle can't be found or is associated with an incorrect resource
+ * type, -EINVAL will be returned.
*/
-static void vmw_surface_define_encode(const struct vmw_surface *srf,
- void *cmd_space)
+int vmw_user_resource_lookup_handle(struct vmw_private *dev_priv,
+ struct ttm_object_file *tfile,
+ uint32_t handle,
+ const struct vmw_user_resource_conv
+ *converter,
+ struct vmw_resource **p_res)
{
- struct vmw_surface_define *cmd = (struct vmw_surface_define *)
- cmd_space;
- struct drm_vmw_size *src_size;
- SVGA3dSize *cmd_size;
- uint32_t cmd_len;
- int i;
-
- cmd_len = sizeof(cmd->body) + srf->num_sizes * sizeof(SVGA3dSize);
-
- cmd->header.id = SVGA_3D_CMD_SURFACE_DEFINE;
- cmd->header.size = cmd_len;
- cmd->body.sid = srf->res.id;
- cmd->body.surfaceFlags = srf->flags;
- cmd->body.format = cpu_to_le32(srf->format);
- for (i = 0; i < DRM_VMW_MAX_SURFACE_FACES; ++i)
- cmd->body.face[i].numMipLevels = srf->mip_levels[i];
-
- cmd += 1;
- cmd_size = (SVGA3dSize *) cmd;
- src_size = srf->sizes;
-
- for (i = 0; i < srf->num_sizes; ++i, cmd_size++, src_size++) {
- cmd_size->width = src_size->width;
- cmd_size->height = src_size->height;
- cmd_size->depth = src_size->depth;
- }
-}
-
-
-/**
- * vmw_surface_dma_encode - Encode a surface_dma command.
- *
- * @srf: Pointer to a struct vmw_surface object.
- * @cmd_space: Pointer to memory area in which the commands should be encoded.
- * @ptr: Pointer to an SVGAGuestPtr indicating where the surface contents
- * should be placed or read from.
- * @to_surface: Boolean whether to DMA to the surface or from the surface.
- */
-static void vmw_surface_dma_encode(struct vmw_surface *srf,
- void *cmd_space,
- const SVGAGuestPtr *ptr,
- bool to_surface)
-{
- uint32_t i;
- uint32_t bpp = vmw_sf_bpp[srf->format].bpp;
- uint32_t stride_bpp = vmw_sf_bpp[srf->format].s_bpp;
- struct vmw_surface_dma *cmd = (struct vmw_surface_dma *)cmd_space;
-
- for (i = 0; i < srf->num_sizes; ++i) {
- SVGA3dCmdHeader *header = &cmd->header;
- SVGA3dCmdSurfaceDMA *body = &cmd->body;
- SVGA3dCopyBox *cb = &cmd->cb;
- SVGA3dCmdSurfaceDMASuffix *suffix = &cmd->suffix;
- const struct vmw_surface_offset *cur_offset = &srf->offsets[i];
- const struct drm_vmw_size *cur_size = &srf->sizes[i];
-
- header->id = SVGA_3D_CMD_SURFACE_DMA;
- header->size = sizeof(*body) + sizeof(*cb) + sizeof(*suffix);
-
- body->guest.ptr = *ptr;
- body->guest.ptr.offset += cur_offset->bo_offset;
- body->guest.pitch = (cur_size->width * stride_bpp + 7) >> 3;
- body->host.sid = srf->res.id;
- body->host.face = cur_offset->face;
- body->host.mipmap = cur_offset->mip;
- body->transfer = ((to_surface) ? SVGA3D_WRITE_HOST_VRAM :
- SVGA3D_READ_HOST_VRAM);
- cb->x = 0;
- cb->y = 0;
- cb->z = 0;
- cb->srcx = 0;
- cb->srcy = 0;
- cb->srcz = 0;
- cb->w = cur_size->width;
- cb->h = cur_size->height;
- cb->d = cur_size->depth;
-
- suffix->suffixSize = sizeof(*suffix);
- suffix->maximumOffset = body->guest.pitch*cur_size->height*
- cur_size->depth*bpp / stride_bpp;
- suffix->flags.discard = 0;
- suffix->flags.unsynchronized = 0;
- suffix->flags.reserved = 0;
- ++cmd;
- }
-};
-
-
-static void vmw_hw_surface_destroy(struct vmw_resource *res)
-{
-
- struct vmw_private *dev_priv = res->dev_priv;
- struct vmw_surface *srf;
- void *cmd;
-
- if (res->id != -1) {
-
- cmd = vmw_fifo_reserve(dev_priv, vmw_surface_destroy_size());
- if (unlikely(cmd == NULL)) {
- DRM_ERROR("Failed reserving FIFO space for surface "
- "destruction.\n");
- return;
- }
-
- vmw_surface_destroy_encode(res->id, cmd);
- vmw_fifo_commit(dev_priv, vmw_surface_destroy_size());
-
- /*
- * used_memory_size_atomic, or separate lock
- * to avoid taking dev_priv::cmdbuf_mutex in
- * the destroy path.
- */
-
- mutex_lock(&dev_priv->cmdbuf_mutex);
- srf = container_of(res, struct vmw_surface, res);
- dev_priv->used_memory_size -= srf->backup_size;
- mutex_unlock(&dev_priv->cmdbuf_mutex);
-
- }
- vmw_3d_resource_dec(dev_priv, false);
-}
-
-void vmw_surface_res_free(struct vmw_resource *res)
-{
- struct vmw_surface *srf = container_of(res, struct vmw_surface, res);
-
- if (srf->backup)
- ttm_bo_unref(&srf->backup);
- kfree(srf->offsets);
- kfree(srf->sizes);
- kfree(srf->snooper.image);
- kfree(srf);
-}
-
-
-/**
- * vmw_surface_do_validate - make a surface available to the device.
- *
- * @dev_priv: Pointer to a device private struct.
- * @srf: Pointer to a struct vmw_surface.
- *
- * If the surface doesn't have a hw id, allocate one, and optionally
- * DMA the backed up surface contents to the device.
- *
- * Returns -EBUSY if there wasn't sufficient device resources to
- * complete the validation. Retry after freeing up resources.
- *
- * May return other errors if the kernel is out of guest resources.
- */
-int vmw_surface_do_validate(struct vmw_private *dev_priv,
- struct vmw_surface *srf)
-{
- struct vmw_resource *res = &srf->res;
- struct list_head val_list;
- struct ttm_validate_buffer val_buf;
- uint32_t submit_size;
- uint8_t *cmd;
- int ret;
-
- if (likely(res->id != -1))
- return 0;
-
- if (unlikely(dev_priv->used_memory_size + srf->backup_size >=
- dev_priv->memory_size))
- return -EBUSY;
-
- /*
- * Reserve- and validate the backup DMA bo.
- */
-
- if (srf->backup) {
- INIT_LIST_HEAD(&val_list);
- val_buf.bo = ttm_bo_reference(srf->backup);
- list_add_tail(&val_buf.head, &val_list);
- ret = ttm_eu_reserve_buffers(&val_list);
- if (unlikely(ret != 0))
- goto out_no_reserve;
-
- ret = ttm_bo_validate(srf->backup, &vmw_srf_placement,
- true, false, false);
- if (unlikely(ret != 0))
- goto out_no_validate;
- }
-
- /*
- * Alloc id for the resource.
- */
-
- ret = vmw_resource_alloc_id(dev_priv, res);
- if (unlikely(ret != 0)) {
- DRM_ERROR("Failed to allocate a surface id.\n");
- goto out_no_id;
- }
- if (unlikely(res->id >= SVGA3D_MAX_SURFACE_IDS)) {
- ret = -EBUSY;
- goto out_no_fifo;
- }
-
-
- /*
- * Encode surface define- and dma commands.
- */
-
- submit_size = vmw_surface_define_size(srf);
- if (srf->backup)
- submit_size += vmw_surface_dma_size(srf);
-
- cmd = vmw_fifo_reserve(dev_priv, submit_size);
- if (unlikely(cmd == NULL)) {
- DRM_ERROR("Failed reserving FIFO space for surface "
- "validation.\n");
- ret = -ENOMEM;
- goto out_no_fifo;
- }
-
- vmw_surface_define_encode(srf, cmd);
- if (srf->backup) {
- SVGAGuestPtr ptr;
-
- cmd += vmw_surface_define_size(srf);
- vmw_bo_get_guest_ptr(srf->backup, &ptr);
- vmw_surface_dma_encode(srf, cmd, &ptr, true);
- }
-
- vmw_fifo_commit(dev_priv, submit_size);
-
- /*
- * Create a fence object and fence the backup buffer.
- */
-
- if (srf->backup) {
- struct vmw_fence_obj *fence;
-
- (void) vmw_execbuf_fence_commands(NULL, dev_priv,
- &fence, NULL);
- ttm_eu_fence_buffer_objects(&val_list, fence);
- if (likely(fence != NULL))
- vmw_fence_obj_unreference(&fence);
- ttm_bo_unref(&val_buf.bo);
- ttm_bo_unref(&srf->backup);
- }
-
- /*
- * Surface memory usage accounting.
- */
-
- dev_priv->used_memory_size += srf->backup_size;
-
- return 0;
-
-out_no_fifo:
- vmw_resource_release_id(res);
-out_no_id:
-out_no_validate:
- if (srf->backup)
- ttm_eu_backoff_reservation(&val_list);
-out_no_reserve:
- if (srf->backup)
- ttm_bo_unref(&val_buf.bo);
- return ret;
-}
-
-/**
- * vmw_surface_evict - Evict a hw surface.
- *
- * @dev_priv: Pointer to a device private struct.
- * @srf: Pointer to a struct vmw_surface
- *
- * DMA the contents of a hw surface to a backup guest buffer object,
- * and destroy the hw surface, releasing its id.
- */
-int vmw_surface_evict(struct vmw_private *dev_priv,
- struct vmw_surface *srf)
-{
- struct vmw_resource *res = &srf->res;
- struct list_head val_list;
- struct ttm_validate_buffer val_buf;
- uint32_t submit_size;
- uint8_t *cmd;
- int ret;
- struct vmw_fence_obj *fence;
- SVGAGuestPtr ptr;
-
- BUG_ON(res->id == -1);
-
- /*
- * Create a surface backup buffer object.
- */
-
- if (!srf->backup) {
- ret = ttm_bo_create(&dev_priv->bdev, srf->backup_size,
- ttm_bo_type_device,
- &vmw_srf_placement, 0, true,
- NULL, &srf->backup);
- if (unlikely(ret != 0))
- return ret;
- }
-
- /*
- * Reserve- and validate the backup DMA bo.
- */
-
- INIT_LIST_HEAD(&val_list);
- val_buf.bo = ttm_bo_reference(srf->backup);
- list_add_tail(&val_buf.head, &val_list);
- ret = ttm_eu_reserve_buffers(&val_list);
- if (unlikely(ret != 0))
- goto out_no_reserve;
-
- ret = ttm_bo_validate(srf->backup, &vmw_srf_placement,
- true, false, false);
- if (unlikely(ret != 0))
- goto out_no_validate;
-
-
- /*
- * Encode the dma- and surface destroy commands.
- */
-
- submit_size = vmw_surface_dma_size(srf) + vmw_surface_destroy_size();
- cmd = vmw_fifo_reserve(dev_priv, submit_size);
- if (unlikely(cmd == NULL)) {
- DRM_ERROR("Failed reserving FIFO space for surface "
- "eviction.\n");
- ret = -ENOMEM;
- goto out_no_fifo;
- }
-
- vmw_bo_get_guest_ptr(srf->backup, &ptr);
- vmw_surface_dma_encode(srf, cmd, &ptr, false);
- cmd += vmw_surface_dma_size(srf);
- vmw_surface_destroy_encode(res->id, cmd);
- vmw_fifo_commit(dev_priv, submit_size);
-
- /*
- * Surface memory usage accounting.
- */
-
- dev_priv->used_memory_size -= srf->backup_size;
-
- /*
- * Create a fence object and fence the DMA buffer.
- */
-
- (void) vmw_execbuf_fence_commands(NULL, dev_priv,
- &fence, NULL);
- ttm_eu_fence_buffer_objects(&val_list, fence);
- if (likely(fence != NULL))
- vmw_fence_obj_unreference(&fence);
- ttm_bo_unref(&val_buf.bo);
-
- /*
- * Release the surface ID.
- */
-
- vmw_resource_release_id(res);
-
- return 0;
-
-out_no_fifo:
-out_no_validate:
- if (srf->backup)
- ttm_eu_backoff_reservation(&val_list);
-out_no_reserve:
- ttm_bo_unref(&val_buf.bo);
- ttm_bo_unref(&srf->backup);
- return ret;
-}
-
-
-/**
- * vmw_surface_validate - make a surface available to the device, evicting
- * other surfaces if needed.
- *
- * @dev_priv: Pointer to a device private struct.
- * @srf: Pointer to a struct vmw_surface.
- *
- * Try to validate a surface and if it fails due to limited device resources,
- * repeatedly try to evict other surfaces until the request can be
- * acommodated.
- *
- * May return errors if out of resources.
- */
-int vmw_surface_validate(struct vmw_private *dev_priv,
- struct vmw_surface *srf)
-{
- int ret;
- struct vmw_surface *evict_srf;
-
- do {
- write_lock(&dev_priv->resource_lock);
- list_del_init(&srf->lru_head);
- write_unlock(&dev_priv->resource_lock);
-
- ret = vmw_surface_do_validate(dev_priv, srf);
- if (likely(ret != -EBUSY))
- break;
-
- write_lock(&dev_priv->resource_lock);
- if (list_empty(&dev_priv->surface_lru)) {
- DRM_ERROR("Out of device memory for surfaces.\n");
- ret = -EBUSY;
- write_unlock(&dev_priv->resource_lock);
- break;
- }
-
- evict_srf = vmw_surface_reference
- (list_first_entry(&dev_priv->surface_lru,
- struct vmw_surface,
- lru_head));
- list_del_init(&evict_srf->lru_head);
-
- write_unlock(&dev_priv->resource_lock);
- (void) vmw_surface_evict(dev_priv, evict_srf);
-
- vmw_surface_unreference(&evict_srf);
-
- } while (1);
-
- if (unlikely(ret != 0 && srf->res.id != -1)) {
- write_lock(&dev_priv->resource_lock);
- list_add_tail(&srf->lru_head, &dev_priv->surface_lru);
- write_unlock(&dev_priv->resource_lock);
- }
-
- return ret;
-}
-
-
-/**
- * vmw_surface_remove_from_lists - Remove surface resources from lookup lists
- *
- * @res: Pointer to a struct vmw_resource embedded in a struct vmw_surface
- *
- * As part of the resource destruction, remove the surface from any
- * lookup lists.
- */
-static void vmw_surface_remove_from_lists(struct vmw_resource *res)
-{
- struct vmw_surface *srf = container_of(res, struct vmw_surface, res);
-
- list_del_init(&srf->lru_head);
-}
-
-int vmw_surface_init(struct vmw_private *dev_priv,
- struct vmw_surface *srf,
- void (*res_free) (struct vmw_resource *res))
-{
- int ret;
- struct vmw_resource *res = &srf->res;
-
- BUG_ON(res_free == NULL);
- INIT_LIST_HEAD(&srf->lru_head);
- ret = vmw_resource_init(dev_priv, res, &dev_priv->surface_idr,
- VMW_RES_SURFACE, true, res_free,
- vmw_surface_remove_from_lists);
-
- if (unlikely(ret != 0))
- res_free(res);
-
- /*
- * The surface won't be visible to hardware until a
- * surface validate.
- */
-
- (void) vmw_3d_resource_inc(dev_priv, false);
- vmw_resource_activate(res, vmw_hw_surface_destroy);
- return ret;
-}
-
-static void vmw_user_surface_free(struct vmw_resource *res)
-{
- struct vmw_surface *srf = container_of(res, struct vmw_surface, res);
- struct vmw_user_surface *user_srf =
- container_of(srf, struct vmw_user_surface, srf);
- struct vmw_private *dev_priv = srf->res.dev_priv;
- uint32_t size = user_srf->size;
-
- if (srf->backup)
- ttm_bo_unref(&srf->backup);
- kfree(srf->offsets);
- kfree(srf->sizes);
- kfree(srf->snooper.image);
- ttm_base_object_kfree(user_srf, base);
- ttm_mem_global_free(vmw_mem_glob(dev_priv), size);
-}
-
-/**
- * vmw_resource_unreserve - unreserve resources previously reserved for
- * command submission.
- *
- * @list_head: list of resources to unreserve.
- *
- * Currently only surfaces are considered, and unreserving a surface
- * means putting it back on the device's surface lru list,
- * so that it can be evicted if necessary.
- * This function traverses the resource list and
- * checks whether resources are surfaces, and in that case puts them back
- * on the device's surface LRU list.
- */
-void vmw_resource_unreserve(struct list_head *list)
-{
- struct vmw_resource *res;
- struct vmw_surface *srf;
- rwlock_t *lock = NULL;
-
- list_for_each_entry(res, list, validate_head) {
-
- if (res->res_free != &vmw_surface_res_free &&
- res->res_free != &vmw_user_surface_free)
- continue;
-
- if (unlikely(lock == NULL)) {
- lock = &res->dev_priv->resource_lock;
- write_lock(lock);
- }
-
- srf = container_of(res, struct vmw_surface, res);
- list_del_init(&srf->lru_head);
- list_add_tail(&srf->lru_head, &res->dev_priv->surface_lru);
- }
-
- if (lock != NULL)
- write_unlock(lock);
-}
-
-/**
- * Helper function that looks either a surface or dmabuf.
- *
- * The pointer this pointed at by out_surf and out_buf needs to be null.
- */
-int vmw_user_lookup_handle(struct vmw_private *dev_priv,
- struct ttm_object_file *tfile,
- uint32_t handle,
- struct vmw_surface **out_surf,
- struct vmw_dma_buffer **out_buf)
-{
- int ret;
-
- BUG_ON(*out_surf || *out_buf);
-
- ret = vmw_user_surface_lookup_handle(dev_priv, tfile, handle, out_surf);
- if (!ret)
- return 0;
-
- ret = vmw_user_dmabuf_lookup(tfile, handle, out_buf);
- return ret;
-}
-
-
-int vmw_user_surface_lookup_handle(struct vmw_private *dev_priv,
- struct ttm_object_file *tfile,
- uint32_t handle, struct vmw_surface **out)
-{
- struct vmw_resource *res;
- struct vmw_surface *srf;
- struct vmw_user_surface *user_srf;
struct ttm_base_object *base;
+ struct vmw_resource *res;
int ret = -EINVAL;
base = ttm_base_object_lookup(tfile, handle);
if (unlikely(base == NULL))
return -EINVAL;
- if (unlikely(base->object_type != VMW_RES_SURFACE))
+ if (unlikely(base->object_type != converter->object_type))
goto out_bad_resource;
- user_srf = container_of(base, struct vmw_user_surface, base);
- srf = &user_srf->srf;
- res = &srf->res;
+ res = converter->base_obj_to_res(base);
read_lock(&dev_priv->resource_lock);
-
- if (!res->avail || res->res_free != &vmw_user_surface_free) {
+ if (!res->avail || res->res_free != converter->res_free) {
read_unlock(&dev_priv->resource_lock);
goto out_bad_resource;
}
@@ -1241,7 +310,7 @@ int vmw_user_surface_lookup_handle(struct vmw_private *dev_priv,
kref_get(&res->kref);
read_unlock(&dev_priv->resource_lock);
- *out = srf;
+ *p_res = res;
ret = 0;
out_bad_resource:
@@ -1250,286 +319,32 @@ out_bad_resource:
return ret;
}
-static void vmw_user_surface_base_release(struct ttm_base_object **p_base)
-{
- struct ttm_base_object *base = *p_base;
- struct vmw_user_surface *user_srf =
- container_of(base, struct vmw_user_surface, base);
- struct vmw_resource *res = &user_srf->srf.res;
-
- *p_base = NULL;
- vmw_resource_unreference(&res);
-}
-
-int vmw_surface_destroy_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
-{
- struct drm_vmw_surface_arg *arg = (struct drm_vmw_surface_arg *)data;
- struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
-
- return ttm_ref_object_base_unref(tfile, arg->sid, TTM_REF_USAGE);
-}
-
-int vmw_surface_define_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
+/**
+ * Helper function that looks either a surface or dmabuf.
+ *
+ * The pointer this pointed at by out_surf and out_buf needs to be null.
+ */
+int vmw_user_lookup_handle(struct vmw_private *dev_priv,
+ struct ttm_object_file *tfile,
+ uint32_t handle,
+ struct vmw_surface **out_surf,
+ struct vmw_dma_buffer **out_buf)
{
- struct vmw_private *dev_priv = vmw_priv(dev);
- struct vmw_user_surface *user_srf;
- struct vmw_surface *srf;
struct vmw_resource *res;
- struct vmw_resource *tmp;
- union drm_vmw_surface_create_arg *arg =
- (union drm_vmw_surface_create_arg *)data;
- struct drm_vmw_surface_create_req *req = &arg->req;
- struct drm_vmw_surface_arg *rep = &arg->rep;
- struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
- struct drm_vmw_size __user *user_sizes;
int ret;
- int i, j;
- uint32_t cur_bo_offset;
- struct drm_vmw_size *cur_size;
- struct vmw_surface_offset *cur_offset;
- uint32_t stride_bpp;
- uint32_t bpp;
- uint32_t num_sizes;
- uint32_t size;
- struct vmw_master *vmaster = vmw_master(file_priv->master);
- if (unlikely(vmw_user_surface_size == 0))
- vmw_user_surface_size = ttm_round_pot(sizeof(*user_srf)) +
- 128;
-
- num_sizes = 0;
- for (i = 0; i < DRM_VMW_MAX_SURFACE_FACES; ++i)
- num_sizes += req->mip_levels[i];
-
- if (num_sizes > DRM_VMW_MAX_SURFACE_FACES *
- DRM_VMW_MAX_MIP_LEVELS)
- return -EINVAL;
-
- size = vmw_user_surface_size + 128 +
- ttm_round_pot(num_sizes * sizeof(struct drm_vmw_size)) +
- ttm_round_pot(num_sizes * sizeof(struct vmw_surface_offset));
-
-
- ret = ttm_read_lock(&vmaster->lock, true);
- if (unlikely(ret != 0))
- return ret;
-
- ret = ttm_mem_global_alloc(vmw_mem_glob(dev_priv),
- size, false, true);
- if (unlikely(ret != 0)) {
- if (ret != -ERESTARTSYS)
- DRM_ERROR("Out of graphics memory for surface"
- " creation.\n");
- goto out_unlock;
- }
-
- user_srf = kmalloc(sizeof(*user_srf), GFP_KERNEL);
- if (unlikely(user_srf == NULL)) {
- ret = -ENOMEM;
- goto out_no_user_srf;
- }
-
- srf = &user_srf->srf;
- res = &srf->res;
-
- srf->flags = req->flags;
- srf->format = req->format;
- srf->scanout = req->scanout;
- srf->backup = NULL;
-
- memcpy(srf->mip_levels, req->mip_levels, sizeof(srf->mip_levels));
- srf->num_sizes = num_sizes;
- user_srf->size = size;
-
- srf->sizes = kmalloc(srf->num_sizes * sizeof(*srf->sizes), GFP_KERNEL);
- if (unlikely(srf->sizes == NULL)) {
- ret = -ENOMEM;
- goto out_no_sizes;
- }
- srf->offsets = kmalloc(srf->num_sizes * sizeof(*srf->offsets),
- GFP_KERNEL);
- if (unlikely(srf->sizes == NULL)) {
- ret = -ENOMEM;
- goto out_no_offsets;
- }
-
- user_sizes = (struct drm_vmw_size __user *)(unsigned long)
- req->size_addr;
-
- ret = copy_from_user(srf->sizes, user_sizes,
- srf->num_sizes * sizeof(*srf->sizes));
- if (unlikely(ret != 0)) {
- ret = -EFAULT;
- goto out_no_copy;
- }
-
- cur_bo_offset = 0;
- cur_offset = srf->offsets;
- cur_size = srf->sizes;
-
- bpp = vmw_sf_bpp[srf->format].bpp;
- stride_bpp = vmw_sf_bpp[srf->format].s_bpp;
-
- for (i = 0; i < DRM_VMW_MAX_SURFACE_FACES; ++i) {
- for (j = 0; j < srf->mip_levels[i]; ++j) {
- uint32_t stride =
- (cur_size->width * stride_bpp + 7) >> 3;
-
- cur_offset->face = i;
- cur_offset->mip = j;
- cur_offset->bo_offset = cur_bo_offset;
- cur_bo_offset += stride * cur_size->height *
- cur_size->depth * bpp / stride_bpp;
- ++cur_offset;
- ++cur_size;
- }
- }
- srf->backup_size = cur_bo_offset;
-
- if (srf->scanout &&
- srf->num_sizes == 1 &&
- srf->sizes[0].width == 64 &&
- srf->sizes[0].height == 64 &&
- srf->format == SVGA3D_A8R8G8B8) {
-
- /* allocate image area and clear it */
- srf->snooper.image = kzalloc(64 * 64 * 4, GFP_KERNEL);
- if (!srf->snooper.image) {
- DRM_ERROR("Failed to allocate cursor_image\n");
- ret = -ENOMEM;
- goto out_no_copy;
- }
- } else {
- srf->snooper.image = NULL;
- }
- srf->snooper.crtc = NULL;
-
- user_srf->base.shareable = false;
- user_srf->base.tfile = NULL;
-
- /**
- * From this point, the generic resource management functions
- * destroy the object on failure.
- */
-
- ret = vmw_surface_init(dev_priv, srf, vmw_user_surface_free);
- if (unlikely(ret != 0))
- goto out_unlock;
-
- tmp = vmw_resource_reference(&srf->res);
- ret = ttm_base_object_init(tfile, &user_srf->base,
- req->shareable, VMW_RES_SURFACE,
- &vmw_user_surface_base_release, NULL);
-
- if (unlikely(ret != 0)) {
- vmw_resource_unreference(&tmp);
- vmw_resource_unreference(&res);
- goto out_unlock;
- }
-
- rep->sid = user_srf->base.hash.key;
- if (rep->sid == SVGA3D_INVALID_ID)
- DRM_ERROR("Created bad Surface ID.\n");
-
- vmw_resource_unreference(&res);
-
- ttm_read_unlock(&vmaster->lock);
- return 0;
-out_no_copy:
- kfree(srf->offsets);
-out_no_offsets:
- kfree(srf->sizes);
-out_no_sizes:
- kfree(user_srf);
-out_no_user_srf:
- ttm_mem_global_free(vmw_mem_glob(dev_priv), size);
-out_unlock:
- ttm_read_unlock(&vmaster->lock);
- return ret;
-}
-
-int vmw_surface_reference_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
-{
- union drm_vmw_surface_reference_arg *arg =
- (union drm_vmw_surface_reference_arg *)data;
- struct drm_vmw_surface_arg *req = &arg->req;
- struct drm_vmw_surface_create_req *rep = &arg->rep;
- struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
- struct vmw_surface *srf;
- struct vmw_user_surface *user_srf;
- struct drm_vmw_size __user *user_sizes;
- struct ttm_base_object *base;
- int ret = -EINVAL;
-
- base = ttm_base_object_lookup(tfile, req->sid);
- if (unlikely(base == NULL)) {
- DRM_ERROR("Could not find surface to reference.\n");
- return -EINVAL;
- }
-
- if (unlikely(base->object_type != VMW_RES_SURFACE))
- goto out_bad_resource;
-
- user_srf = container_of(base, struct vmw_user_surface, base);
- srf = &user_srf->srf;
-
- ret = ttm_ref_object_add(tfile, &user_srf->base, TTM_REF_USAGE, NULL);
- if (unlikely(ret != 0)) {
- DRM_ERROR("Could not add a reference to a surface.\n");
- goto out_no_reference;
- }
-
- rep->flags = srf->flags;
- rep->format = srf->format;
- memcpy(rep->mip_levels, srf->mip_levels, sizeof(srf->mip_levels));
- user_sizes = (struct drm_vmw_size __user *)(unsigned long)
- rep->size_addr;
+ BUG_ON(*out_surf || *out_buf);
- if (user_sizes)
- ret = copy_to_user(user_sizes, srf->sizes,
- srf->num_sizes * sizeof(*srf->sizes));
- if (unlikely(ret != 0)) {
- DRM_ERROR("copy_to_user failed %p %u\n",
- user_sizes, srf->num_sizes);
- ret = -EFAULT;
+ ret = vmw_user_resource_lookup_handle(dev_priv, tfile, handle,
+ user_surface_converter,
+ &res);
+ if (!ret) {
+ *out_surf = vmw_res_to_srf(res);
+ return 0;
}
-out_bad_resource:
-out_no_reference:
- ttm_base_object_unref(&base);
-
- return ret;
-}
-
-int vmw_surface_check(struct vmw_private *dev_priv,
- struct ttm_object_file *tfile,
- uint32_t handle, int *id)
-{
- struct ttm_base_object *base;
- struct vmw_user_surface *user_srf;
-
- int ret = -EPERM;
-
- base = ttm_base_object_lookup(tfile, handle);
- if (unlikely(base == NULL))
- return -EINVAL;
- if (unlikely(base->object_type != VMW_RES_SURFACE))
- goto out_bad_surface;
-
- user_srf = container_of(base, struct vmw_user_surface, base);
- *id = user_srf->srf.res.id;
- ret = 0;
-
-out_bad_surface:
- /**
- * FIXME: May deadlock here when called from the
- * command parsing code.
- */
-
- ttm_base_object_unref(&base);
+ *out_surf = NULL;
+ ret = vmw_user_dmabuf_lookup(tfile, handle, out_buf);
return ret;
}
@@ -1558,7 +373,7 @@ int vmw_dmabuf_init(struct vmw_private *dev_priv,
acc_size = ttm_bo_acc_size(bdev, size, sizeof(struct vmw_dma_buffer));
memset(vmw_bo, 0, sizeof(*vmw_bo));
- INIT_LIST_HEAD(&vmw_bo->validate_list);
+ INIT_LIST_HEAD(&vmw_bo->res_list);
ret = ttm_bo_init(bdev, &vmw_bo->base, size,
ttm_bo_type_device, placement,
@@ -1590,6 +405,79 @@ static void vmw_user_dmabuf_release(struct ttm_base_object **p_base)
ttm_bo_unref(&bo);
}
+/**
+ * vmw_user_dmabuf_alloc - Allocate a user dma buffer
+ *
+ * @dev_priv: Pointer to a struct device private.
+ * @tfile: Pointer to a struct ttm_object_file on which to register the user
+ * object.
+ * @size: Size of the dma buffer.
+ * @shareable: Boolean whether the buffer is shareable with other open files.
+ * @handle: Pointer to where the handle value should be assigned.
+ * @p_dma_buf: Pointer to where the refcounted struct vmw_dma_buffer pointer
+ * should be assigned.
+ */
+int vmw_user_dmabuf_alloc(struct vmw_private *dev_priv,
+ struct ttm_object_file *tfile,
+ uint32_t size,
+ bool shareable,
+ uint32_t *handle,
+ struct vmw_dma_buffer **p_dma_buf)
+{
+ struct vmw_user_dma_buffer *user_bo;
+ struct ttm_buffer_object *tmp;
+ int ret;
+
+ user_bo = kzalloc(sizeof(*user_bo), GFP_KERNEL);
+ if (unlikely(user_bo == NULL)) {
+ DRM_ERROR("Failed to allocate a buffer.\n");
+ return -ENOMEM;
+ }
+
+ ret = vmw_dmabuf_init(dev_priv, &user_bo->dma, size,
+ &vmw_vram_sys_placement, true,
+ &vmw_user_dmabuf_destroy);
+ if (unlikely(ret != 0))
+ return ret;
+
+ tmp = ttm_bo_reference(&user_bo->dma.base);
+ ret = ttm_base_object_init(tfile,
+ &user_bo->base,
+ shareable,
+ ttm_buffer_type,
+ &vmw_user_dmabuf_release, NULL);
+ if (unlikely(ret != 0)) {
+ ttm_bo_unref(&tmp);
+ goto out_no_base_object;
+ }
+
+ *p_dma_buf = &user_bo->dma;
+ *handle = user_bo->base.hash.key;
+
+out_no_base_object:
+ return ret;
+}
+
+/**
+ * vmw_user_dmabuf_verify_access - verify access permissions on this
+ * buffer object.
+ *
+ * @bo: Pointer to the buffer object being accessed
+ * @tfile: Identifying the caller.
+ */
+int vmw_user_dmabuf_verify_access(struct ttm_buffer_object *bo,
+ struct ttm_object_file *tfile)
+{
+ struct vmw_user_dma_buffer *vmw_user_bo;
+
+ if (unlikely(bo->destroy != vmw_user_dmabuf_destroy))
+ return -EPERM;
+
+ vmw_user_bo = vmw_user_dma_buffer(bo);
+ return (vmw_user_bo->base.tfile == tfile ||
+ vmw_user_bo->base.shareable) ? 0 : -EPERM;
+}
+
int vmw_dmabuf_alloc_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
@@ -1598,44 +486,27 @@ int vmw_dmabuf_alloc_ioctl(struct drm_device *dev, void *data,
(union drm_vmw_alloc_dmabuf_arg *)data;
struct drm_vmw_alloc_dmabuf_req *req = &arg->req;
struct drm_vmw_dmabuf_rep *rep = &arg->rep;
- struct vmw_user_dma_buffer *vmw_user_bo;
- struct ttm_buffer_object *tmp;
+ struct vmw_dma_buffer *dma_buf;
+ uint32_t handle;
struct vmw_master *vmaster = vmw_master(file_priv->master);
int ret;
- vmw_user_bo = kzalloc(sizeof(*vmw_user_bo), GFP_KERNEL);
- if (unlikely(vmw_user_bo == NULL))
- return -ENOMEM;
-
ret = ttm_read_lock(&vmaster->lock, true);
- if (unlikely(ret != 0)) {
- kfree(vmw_user_bo);
+ if (unlikely(ret != 0))
return ret;
- }
- ret = vmw_dmabuf_init(dev_priv, &vmw_user_bo->dma, req->size,
- &vmw_vram_sys_placement, true,
- &vmw_user_dmabuf_destroy);
+ ret = vmw_user_dmabuf_alloc(dev_priv, vmw_fpriv(file_priv)->tfile,
+ req->size, false, &handle, &dma_buf);
if (unlikely(ret != 0))
goto out_no_dmabuf;
- tmp = ttm_bo_reference(&vmw_user_bo->dma.base);
- ret = ttm_base_object_init(vmw_fpriv(file_priv)->tfile,
- &vmw_user_bo->base,
- false,
- ttm_buffer_type,
- &vmw_user_dmabuf_release, NULL);
- if (unlikely(ret != 0))
- goto out_no_base_object;
- else {
- rep->handle = vmw_user_bo->base.hash.key;
- rep->map_handle = vmw_user_bo->dma.base.addr_space_offset;
- rep->cur_gmr_id = vmw_user_bo->base.hash.key;
- rep->cur_gmr_offset = 0;
- }
+ rep->handle = handle;
+ rep->map_handle = dma_buf->base.addr_space_offset;
+ rep->cur_gmr_id = handle;
+ rep->cur_gmr_offset = 0;
+
+ vmw_dmabuf_unreference(&dma_buf);
-out_no_base_object:
- ttm_bo_unref(&tmp);
out_no_dmabuf:
ttm_read_unlock(&vmaster->lock);
@@ -1653,27 +524,6 @@ int vmw_dmabuf_unref_ioctl(struct drm_device *dev, void *data,
TTM_REF_USAGE);
}
-uint32_t vmw_dmabuf_validate_node(struct ttm_buffer_object *bo,
- uint32_t cur_validate_node)
-{
- struct vmw_dma_buffer *vmw_bo = vmw_dma_buffer(bo);
-
- if (likely(vmw_bo->on_validate_list))
- return vmw_bo->cur_validate_node;
-
- vmw_bo->cur_validate_node = cur_validate_node;
- vmw_bo->on_validate_list = true;
-
- return cur_validate_node;
-}
-
-void vmw_dmabuf_validate_clear(struct ttm_buffer_object *bo)
-{
- struct vmw_dma_buffer *vmw_bo = vmw_dma_buffer(bo);
-
- vmw_bo->on_validate_list = false;
-}
-
int vmw_user_dmabuf_lookup(struct ttm_object_file *tfile,
uint32_t handle, struct vmw_dma_buffer **out)
{
@@ -1702,6 +552,18 @@ int vmw_user_dmabuf_lookup(struct ttm_object_file *tfile,
return 0;
}
+int vmw_user_dmabuf_reference(struct ttm_object_file *tfile,
+ struct vmw_dma_buffer *dma_buf)
+{
+ struct vmw_user_dma_buffer *user_bo;
+
+ if (dma_buf->base.destroy != vmw_user_dmabuf_destroy)
+ return -EINVAL;
+
+ user_bo = container_of(dma_buf, struct vmw_user_dma_buffer, dma);
+ return ttm_ref_object_add(tfile, &user_bo->base, TTM_REF_USAGE, NULL);
+}
+
/*
* Stream management
*/
@@ -1726,8 +588,8 @@ static int vmw_stream_init(struct vmw_private *dev_priv,
struct vmw_resource *res = &stream->res;
int ret;
- ret = vmw_resource_init(dev_priv, res, &dev_priv->stream_idr,
- VMW_RES_STREAM, false, res_free, NULL);
+ ret = vmw_resource_init(dev_priv, res, false, res_free,
+ &vmw_stream_func);
if (unlikely(ret != 0)) {
if (res_free == NULL)
@@ -1749,10 +611,6 @@ static int vmw_stream_init(struct vmw_private *dev_priv,
return 0;
}
-/**
- * User-space context management:
- */
-
static void vmw_user_stream_free(struct vmw_resource *res)
{
struct vmw_user_stream *stream =
@@ -1788,9 +646,11 @@ int vmw_stream_unref_ioctl(struct drm_device *dev, void *data,
struct vmw_user_stream *stream;
struct drm_vmw_stream_arg *arg = (struct drm_vmw_stream_arg *)data;
struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
+ struct idr *idr = &dev_priv->res_idr[vmw_res_stream];
int ret = 0;
- res = vmw_resource_lookup(dev_priv, &dev_priv->stream_idr, arg->stream_id);
+
+ res = vmw_resource_lookup(dev_priv, idr, arg->stream_id);
if (unlikely(res == NULL))
return -EINVAL;
@@ -1891,7 +751,8 @@ int vmw_user_stream_lookup(struct vmw_private *dev_priv,
struct vmw_resource *res;
int ret;
- res = vmw_resource_lookup(dev_priv, &dev_priv->stream_idr, *inout_id);
+ res = vmw_resource_lookup(dev_priv, &dev_priv->res_idr[vmw_res_stream],
+ *inout_id);
if (unlikely(res == NULL))
return -EINVAL;
@@ -1986,3 +847,453 @@ int vmw_dumb_destroy(struct drm_file *file_priv,
return ttm_ref_object_base_unref(vmw_fpriv(file_priv)->tfile,
handle, TTM_REF_USAGE);
}
+
+/**
+ * vmw_resource_buf_alloc - Allocate a backup buffer for a resource.
+ *
+ * @res: The resource for which to allocate a backup buffer.
+ * @interruptible: Whether any sleeps during allocation should be
+ * performed while interruptible.
+ */
+static int vmw_resource_buf_alloc(struct vmw_resource *res,
+ bool interruptible)
+{
+ unsigned long size =
+ (res->backup_size + PAGE_SIZE - 1) & PAGE_MASK;
+ struct vmw_dma_buffer *backup;
+ int ret;
+
+ if (likely(res->backup)) {
+ BUG_ON(res->backup->base.num_pages * PAGE_SIZE < size);
+ return 0;
+ }
+
+ backup = kzalloc(sizeof(*backup), GFP_KERNEL);
+ if (unlikely(backup == NULL))
+ return -ENOMEM;
+
+ ret = vmw_dmabuf_init(res->dev_priv, backup, res->backup_size,
+ res->func->backup_placement,
+ interruptible,
+ &vmw_dmabuf_bo_free);
+ if (unlikely(ret != 0))
+ goto out_no_dmabuf;
+
+ res->backup = backup;
+
+out_no_dmabuf:
+ return ret;
+}
+
+/**
+ * vmw_resource_do_validate - Make a resource up-to-date and visible
+ * to the device.
+ *
+ * @res: The resource to make visible to the device.
+ * @val_buf: Information about a buffer possibly
+ * containing backup data if a bind operation is needed.
+ *
+ * On hardware resource shortage, this function returns -EBUSY and
+ * should be retried once resources have been freed up.
+ */
+static int vmw_resource_do_validate(struct vmw_resource *res,
+ struct ttm_validate_buffer *val_buf)
+{
+ int ret = 0;
+ const struct vmw_res_func *func = res->func;
+
+ if (unlikely(res->id == -1)) {
+ ret = func->create(res);
+ if (unlikely(ret != 0))
+ return ret;
+ }
+
+ if (func->bind &&
+ ((func->needs_backup && list_empty(&res->mob_head) &&
+ val_buf->bo != NULL) ||
+ (!func->needs_backup && val_buf->bo != NULL))) {
+ ret = func->bind(res, val_buf);
+ if (unlikely(ret != 0))
+ goto out_bind_failed;
+ if (func->needs_backup)
+ list_add_tail(&res->mob_head, &res->backup->res_list);
+ }
+
+ /*
+ * Only do this on write operations, and move to
+ * vmw_resource_unreserve if it can be called after
+ * backup buffers have been unreserved. Otherwise
+ * sort out locking.
+ */
+ res->res_dirty = true;
+
+ return 0;
+
+out_bind_failed:
+ func->destroy(res);
+
+ return ret;
+}
+
+/**
+ * vmw_resource_unreserve - Unreserve a resource previously reserved for
+ * command submission.
+ *
+ * @res: Pointer to the struct vmw_resource to unreserve.
+ * @new_backup: Pointer to new backup buffer if command submission
+ * switched.
+ * @new_backup_offset: New backup offset if @new_backup is !NULL.
+ *
+ * Currently unreserving a resource means putting it back on the device's
+ * resource lru list, so that it can be evicted if necessary.
+ */
+void vmw_resource_unreserve(struct vmw_resource *res,
+ struct vmw_dma_buffer *new_backup,
+ unsigned long new_backup_offset)
+{
+ struct vmw_private *dev_priv = res->dev_priv;
+
+ if (!list_empty(&res->lru_head))
+ return;
+
+ if (new_backup && new_backup != res->backup) {
+
+ if (res->backup) {
+ BUG_ON(atomic_read(&res->backup->base.reserved) == 0);
+ list_del_init(&res->mob_head);
+ vmw_dmabuf_unreference(&res->backup);
+ }
+
+ res->backup = vmw_dmabuf_reference(new_backup);
+ BUG_ON(atomic_read(&new_backup->base.reserved) == 0);
+ list_add_tail(&res->mob_head, &new_backup->res_list);
+ }
+ if (new_backup)
+ res->backup_offset = new_backup_offset;
+
+ if (!res->func->may_evict)
+ return;
+
+ write_lock(&dev_priv->resource_lock);
+ list_add_tail(&res->lru_head,
+ &res->dev_priv->res_lru[res->func->res_type]);
+ write_unlock(&dev_priv->resource_lock);
+}
+
+/**
+ * vmw_resource_check_buffer - Check whether a backup buffer is needed
+ * for a resource and in that case, allocate
+ * one, reserve and validate it.
+ *
+ * @res: The resource for which to allocate a backup buffer.
+ * @interruptible: Whether any sleeps during allocation should be
+ * performed while interruptible.
+ * @val_buf: On successful return contains data about the
+ * reserved and validated backup buffer.
+ */
+int vmw_resource_check_buffer(struct vmw_resource *res,
+ bool interruptible,
+ struct ttm_validate_buffer *val_buf)
+{
+ struct list_head val_list;
+ bool backup_dirty = false;
+ int ret;
+
+ if (unlikely(res->backup == NULL)) {
+ ret = vmw_resource_buf_alloc(res, interruptible);
+ if (unlikely(ret != 0))
+ return ret;
+ }
+
+ INIT_LIST_HEAD(&val_list);
+ val_buf->bo = ttm_bo_reference(&res->backup->base);
+ list_add_tail(&val_buf->head, &val_list);
+ ret = ttm_eu_reserve_buffers(&val_list);
+ if (unlikely(ret != 0))
+ goto out_no_reserve;
+
+ if (res->func->needs_backup && list_empty(&res->mob_head))
+ return 0;
+
+ backup_dirty = res->backup_dirty;
+ ret = ttm_bo_validate(&res->backup->base,
+ res->func->backup_placement,
+ true, false);
+
+ if (unlikely(ret != 0))
+ goto out_no_validate;
+
+ return 0;
+
+out_no_validate:
+ ttm_eu_backoff_reservation(&val_list);
+out_no_reserve:
+ ttm_bo_unref(&val_buf->bo);
+ if (backup_dirty)
+ vmw_dmabuf_unreference(&res->backup);
+
+ return ret;
+}
+
+/**
+ * vmw_resource_reserve - Reserve a resource for command submission
+ *
+ * @res: The resource to reserve.
+ *
+ * This function takes the resource off the LRU list and make sure
+ * a backup buffer is present for guest-backed resources. However,
+ * the buffer may not be bound to the resource at this point.
+ *
+ */
+int vmw_resource_reserve(struct vmw_resource *res, bool no_backup)
+{
+ struct vmw_private *dev_priv = res->dev_priv;
+ int ret;
+
+ write_lock(&dev_priv->resource_lock);
+ list_del_init(&res->lru_head);
+ write_unlock(&dev_priv->resource_lock);
+
+ if (res->func->needs_backup && res->backup == NULL &&
+ !no_backup) {
+ ret = vmw_resource_buf_alloc(res, true);
+ if (unlikely(ret != 0))
+ return ret;
+ }
+
+ return 0;
+}
+
+/**
+ * vmw_resource_backoff_reservation - Unreserve and unreference a
+ * backup buffer
+ *.
+ * @val_buf: Backup buffer information.
+ */
+void vmw_resource_backoff_reservation(struct ttm_validate_buffer *val_buf)
+{
+ struct list_head val_list;
+
+ if (likely(val_buf->bo == NULL))
+ return;
+
+ INIT_LIST_HEAD(&val_list);
+ list_add_tail(&val_buf->head, &val_list);
+ ttm_eu_backoff_reservation(&val_list);
+ ttm_bo_unref(&val_buf->bo);
+}
+
+/**
+ * vmw_resource_do_evict - Evict a resource, and transfer its data
+ * to a backup buffer.
+ *
+ * @res: The resource to evict.
+ */
+int vmw_resource_do_evict(struct vmw_resource *res)
+{
+ struct ttm_validate_buffer val_buf;
+ const struct vmw_res_func *func = res->func;
+ int ret;
+
+ BUG_ON(!func->may_evict);
+
+ val_buf.bo = NULL;
+ ret = vmw_resource_check_buffer(res, true, &val_buf);
+ if (unlikely(ret != 0))
+ return ret;
+
+ if (unlikely(func->unbind != NULL &&
+ (!func->needs_backup || !list_empty(&res->mob_head)))) {
+ ret = func->unbind(res, res->res_dirty, &val_buf);
+ if (unlikely(ret != 0))
+ goto out_no_unbind;
+ list_del_init(&res->mob_head);
+ }
+ ret = func->destroy(res);
+ res->backup_dirty = true;
+ res->res_dirty = false;
+out_no_unbind:
+ vmw_resource_backoff_reservation(&val_buf);
+
+ return ret;
+}
+
+
+/**
+ * vmw_resource_validate - Make a resource up-to-date and visible
+ * to the device.
+ *
+ * @res: The resource to make visible to the device.
+ *
+ * On succesful return, any backup DMA buffer pointed to by @res->backup will
+ * be reserved and validated.
+ * On hardware resource shortage, this function will repeatedly evict
+ * resources of the same type until the validation succeeds.
+ */
+int vmw_resource_validate(struct vmw_resource *res)
+{
+ int ret;
+ struct vmw_resource *evict_res;
+ struct vmw_private *dev_priv = res->dev_priv;
+ struct list_head *lru_list = &dev_priv->res_lru[res->func->res_type];
+ struct ttm_validate_buffer val_buf;
+
+ if (likely(!res->func->may_evict))
+ return 0;
+
+ val_buf.bo = NULL;
+ if (res->backup)
+ val_buf.bo = &res->backup->base;
+ do {
+ ret = vmw_resource_do_validate(res, &val_buf);
+ if (likely(ret != -EBUSY))
+ break;
+
+ write_lock(&dev_priv->resource_lock);
+ if (list_empty(lru_list) || !res->func->may_evict) {
+ DRM_ERROR("Out of device device id entries "
+ "for %s.\n", res->func->type_name);
+ ret = -EBUSY;
+ write_unlock(&dev_priv->resource_lock);
+ break;
+ }
+
+ evict_res = vmw_resource_reference
+ (list_first_entry(lru_list, struct vmw_resource,
+ lru_head));
+ list_del_init(&evict_res->lru_head);
+
+ write_unlock(&dev_priv->resource_lock);
+ vmw_resource_do_evict(evict_res);
+ vmw_resource_unreference(&evict_res);
+ } while (1);
+
+ if (unlikely(ret != 0))
+ goto out_no_validate;
+ else if (!res->func->needs_backup && res->backup) {
+ list_del_init(&res->mob_head);
+ vmw_dmabuf_unreference(&res->backup);
+ }
+
+ return 0;
+
+out_no_validate:
+ return ret;
+}
+
+/**
+ * vmw_fence_single_bo - Utility function to fence a single TTM buffer
+ * object without unreserving it.
+ *
+ * @bo: Pointer to the struct ttm_buffer_object to fence.
+ * @fence: Pointer to the fence. If NULL, this function will
+ * insert a fence into the command stream..
+ *
+ * Contrary to the ttm_eu version of this function, it takes only
+ * a single buffer object instead of a list, and it also doesn't
+ * unreserve the buffer object, which needs to be done separately.
+ */
+void vmw_fence_single_bo(struct ttm_buffer_object *bo,
+ struct vmw_fence_obj *fence)
+{
+ struct ttm_bo_device *bdev = bo->bdev;
+ struct ttm_bo_driver *driver = bdev->driver;
+ struct vmw_fence_obj *old_fence_obj;
+ struct vmw_private *dev_priv =
+ container_of(bdev, struct vmw_private, bdev);
+
+ if (fence == NULL)
+ vmw_execbuf_fence_commands(NULL, dev_priv, &fence, NULL);
+ else
+ driver->sync_obj_ref(fence);
+
+ spin_lock(&bdev->fence_lock);
+
+ old_fence_obj = bo->sync_obj;
+ bo->sync_obj = fence;
+
+ spin_unlock(&bdev->fence_lock);
+
+ if (old_fence_obj)
+ vmw_fence_obj_unreference(&old_fence_obj);
+}
+
+/**
+ * vmw_resource_move_notify - TTM move_notify_callback
+ *
+ * @bo: The TTM buffer object about to move.
+ * @mem: The truct ttm_mem_reg indicating to what memory
+ * region the move is taking place.
+ *
+ * For now does nothing.
+ */
+void vmw_resource_move_notify(struct ttm_buffer_object *bo,
+ struct ttm_mem_reg *mem)
+{
+}
+
+/**
+ * vmw_resource_needs_backup - Return whether a resource needs a backup buffer.
+ *
+ * @res: The resource being queried.
+ */
+bool vmw_resource_needs_backup(const struct vmw_resource *res)
+{
+ return res->func->needs_backup;
+}
+
+/**
+ * vmw_resource_evict_type - Evict all resources of a specific type
+ *
+ * @dev_priv: Pointer to a device private struct
+ * @type: The resource type to evict
+ *
+ * To avoid thrashing starvation or as part of the hibernation sequence,
+ * evict all evictable resources of a specific type.
+ */
+static void vmw_resource_evict_type(struct vmw_private *dev_priv,
+ enum vmw_res_type type)
+{
+ struct list_head *lru_list = &dev_priv->res_lru[type];
+ struct vmw_resource *evict_res;
+
+ do {
+ write_lock(&dev_priv->resource_lock);
+
+ if (list_empty(lru_list))
+ goto out_unlock;
+
+ evict_res = vmw_resource_reference(
+ list_first_entry(lru_list, struct vmw_resource,
+ lru_head));
+ list_del_init(&evict_res->lru_head);
+ write_unlock(&dev_priv->resource_lock);
+ vmw_resource_do_evict(evict_res);
+ vmw_resource_unreference(&evict_res);
+ } while (1);
+
+out_unlock:
+ write_unlock(&dev_priv->resource_lock);
+}
+
+/**
+ * vmw_resource_evict_all - Evict all evictable resources
+ *
+ * @dev_priv: Pointer to a device private struct
+ *
+ * To avoid thrashing starvation or as part of the hibernation sequence,
+ * evict all evictable resources. In particular this means that all
+ * guest-backed resources that are registered with the device are
+ * evicted and the OTable becomes clean.
+ */
+void vmw_resource_evict_all(struct vmw_private *dev_priv)
+{
+ enum vmw_res_type type;
+
+ mutex_lock(&dev_priv->cmdbuf_mutex);
+
+ for (type = 0; type < vmw_res_max; ++type)
+ vmw_resource_evict_type(dev_priv, type);
+
+ mutex_unlock(&dev_priv->cmdbuf_mutex);
+}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_resource_priv.h b/drivers/gpu/drm/vmwgfx/vmwgfx_resource_priv.h
new file mode 100644
index 000000000000..f3adeed2854c
--- /dev/null
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_resource_priv.h
@@ -0,0 +1,84 @@
+/**************************************************************************
+ *
+ * Copyright © 2012 VMware, Inc., Palo Alto, CA., USA
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+#ifndef _VMWGFX_RESOURCE_PRIV_H_
+#define _VMWGFX_RESOURCE_PRIV_H_
+
+#include "vmwgfx_drv.h"
+
+/**
+ * struct vmw_user_resource_conv - Identify a derived user-exported resource
+ * type and provide a function to convert its ttm_base_object pointer to
+ * a struct vmw_resource
+ */
+struct vmw_user_resource_conv {
+ enum ttm_object_type object_type;
+ struct vmw_resource *(*base_obj_to_res)(struct ttm_base_object *base);
+ void (*res_free) (struct vmw_resource *res);
+};
+
+/**
+ * struct vmw_res_func - members and functions common for a resource type
+ *
+ * @res_type: Enum that identifies the lru list to use for eviction.
+ * @needs_backup: Whether the resource is guest-backed and needs
+ * persistent buffer storage.
+ * @type_name: String that identifies the resource type.
+ * @backup_placement: TTM placement for backup buffers.
+ * @may_evict Whether the resource may be evicted.
+ * @create: Create a hardware resource.
+ * @destroy: Destroy a hardware resource.
+ * @bind: Bind a hardware resource to persistent buffer storage.
+ * @unbind: Unbind a hardware resource from persistent
+ * buffer storage.
+ */
+
+struct vmw_res_func {
+ enum vmw_res_type res_type;
+ bool needs_backup;
+ const char *type_name;
+ struct ttm_placement *backup_placement;
+ bool may_evict;
+
+ int (*create) (struct vmw_resource *res);
+ int (*destroy) (struct vmw_resource *res);
+ int (*bind) (struct vmw_resource *res,
+ struct ttm_validate_buffer *val_buf);
+ int (*unbind) (struct vmw_resource *res,
+ bool readback,
+ struct ttm_validate_buffer *val_buf);
+};
+
+int vmw_resource_alloc_id(struct vmw_resource *res);
+void vmw_resource_release_id(struct vmw_resource *res);
+int vmw_resource_init(struct vmw_private *dev_priv, struct vmw_resource *res,
+ bool delay_id,
+ void (*res_free) (struct vmw_resource *res),
+ const struct vmw_res_func *func);
+void vmw_resource_activate(struct vmw_resource *res,
+ void (*hw_destroy) (struct vmw_resource *));
+#endif
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c b/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c
index 60f12855362b..26387c3d5a21 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c
@@ -468,7 +468,7 @@ static int vmw_sou_init(struct vmw_private *dev_priv, unsigned unit)
drm_mode_crtc_set_gamma_size(crtc, 256);
- drm_connector_attach_property(connector,
+ drm_object_attach_property(&connector->base,
dev->mode_config.dirty_info_property,
1);
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c b/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c
new file mode 100644
index 000000000000..582814339748
--- /dev/null
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c
@@ -0,0 +1,893 @@
+/**************************************************************************
+ *
+ * Copyright © 2009-2012 VMware, Inc., Palo Alto, CA., USA
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+#include "vmwgfx_drv.h"
+#include "vmwgfx_resource_priv.h"
+#include <ttm/ttm_placement.h>
+#include "svga3d_surfacedefs.h"
+
+/**
+ * struct vmw_user_surface - User-space visible surface resource
+ *
+ * @base: The TTM base object handling user-space visibility.
+ * @srf: The surface metadata.
+ * @size: TTM accounting size for the surface.
+ */
+struct vmw_user_surface {
+ struct ttm_base_object base;
+ struct vmw_surface srf;
+ uint32_t size;
+ uint32_t backup_handle;
+};
+
+/**
+ * struct vmw_surface_offset - Backing store mip level offset info
+ *
+ * @face: Surface face.
+ * @mip: Mip level.
+ * @bo_offset: Offset into backing store of this mip level.
+ *
+ */
+struct vmw_surface_offset {
+ uint32_t face;
+ uint32_t mip;
+ uint32_t bo_offset;
+};
+
+static void vmw_user_surface_free(struct vmw_resource *res);
+static struct vmw_resource *
+vmw_user_surface_base_to_res(struct ttm_base_object *base);
+static int vmw_legacy_srf_bind(struct vmw_resource *res,
+ struct ttm_validate_buffer *val_buf);
+static int vmw_legacy_srf_unbind(struct vmw_resource *res,
+ bool readback,
+ struct ttm_validate_buffer *val_buf);
+static int vmw_legacy_srf_create(struct vmw_resource *res);
+static int vmw_legacy_srf_destroy(struct vmw_resource *res);
+
+static const struct vmw_user_resource_conv user_surface_conv = {
+ .object_type = VMW_RES_SURFACE,
+ .base_obj_to_res = vmw_user_surface_base_to_res,
+ .res_free = vmw_user_surface_free
+};
+
+const struct vmw_user_resource_conv *user_surface_converter =
+ &user_surface_conv;
+
+
+static uint64_t vmw_user_surface_size;
+
+static const struct vmw_res_func vmw_legacy_surface_func = {
+ .res_type = vmw_res_surface,
+ .needs_backup = false,
+ .may_evict = true,
+ .type_name = "legacy surfaces",
+ .backup_placement = &vmw_srf_placement,
+ .create = &vmw_legacy_srf_create,
+ .destroy = &vmw_legacy_srf_destroy,
+ .bind = &vmw_legacy_srf_bind,
+ .unbind = &vmw_legacy_srf_unbind
+};
+
+/**
+ * struct vmw_surface_dma - SVGA3D DMA command
+ */
+struct vmw_surface_dma {
+ SVGA3dCmdHeader header;
+ SVGA3dCmdSurfaceDMA body;
+ SVGA3dCopyBox cb;
+ SVGA3dCmdSurfaceDMASuffix suffix;
+};
+
+/**
+ * struct vmw_surface_define - SVGA3D Surface Define command
+ */
+struct vmw_surface_define {
+ SVGA3dCmdHeader header;
+ SVGA3dCmdDefineSurface body;
+};
+
+/**
+ * struct vmw_surface_destroy - SVGA3D Surface Destroy command
+ */
+struct vmw_surface_destroy {
+ SVGA3dCmdHeader header;
+ SVGA3dCmdDestroySurface body;
+};
+
+
+/**
+ * vmw_surface_dma_size - Compute fifo size for a dma command.
+ *
+ * @srf: Pointer to a struct vmw_surface
+ *
+ * Computes the required size for a surface dma command for backup or
+ * restoration of the surface represented by @srf.
+ */
+static inline uint32_t vmw_surface_dma_size(const struct vmw_surface *srf)
+{
+ return srf->num_sizes * sizeof(struct vmw_surface_dma);
+}
+
+
+/**
+ * vmw_surface_define_size - Compute fifo size for a surface define command.
+ *
+ * @srf: Pointer to a struct vmw_surface
+ *
+ * Computes the required size for a surface define command for the definition
+ * of the surface represented by @srf.
+ */
+static inline uint32_t vmw_surface_define_size(const struct vmw_surface *srf)
+{
+ return sizeof(struct vmw_surface_define) + srf->num_sizes *
+ sizeof(SVGA3dSize);
+}
+
+
+/**
+ * vmw_surface_destroy_size - Compute fifo size for a surface destroy command.
+ *
+ * Computes the required size for a surface destroy command for the destruction
+ * of a hw surface.
+ */
+static inline uint32_t vmw_surface_destroy_size(void)
+{
+ return sizeof(struct vmw_surface_destroy);
+}
+
+/**
+ * vmw_surface_destroy_encode - Encode a surface_destroy command.
+ *
+ * @id: The surface id
+ * @cmd_space: Pointer to memory area in which the commands should be encoded.
+ */
+static void vmw_surface_destroy_encode(uint32_t id,
+ void *cmd_space)
+{
+ struct vmw_surface_destroy *cmd = (struct vmw_surface_destroy *)
+ cmd_space;
+
+ cmd->header.id = SVGA_3D_CMD_SURFACE_DESTROY;
+ cmd->header.size = sizeof(cmd->body);
+ cmd->body.sid = id;
+}
+
+/**
+ * vmw_surface_define_encode - Encode a surface_define command.
+ *
+ * @srf: Pointer to a struct vmw_surface object.
+ * @cmd_space: Pointer to memory area in which the commands should be encoded.
+ */
+static void vmw_surface_define_encode(const struct vmw_surface *srf,
+ void *cmd_space)
+{
+ struct vmw_surface_define *cmd = (struct vmw_surface_define *)
+ cmd_space;
+ struct drm_vmw_size *src_size;
+ SVGA3dSize *cmd_size;
+ uint32_t cmd_len;
+ int i;
+
+ cmd_len = sizeof(cmd->body) + srf->num_sizes * sizeof(SVGA3dSize);
+
+ cmd->header.id = SVGA_3D_CMD_SURFACE_DEFINE;
+ cmd->header.size = cmd_len;
+ cmd->body.sid = srf->res.id;
+ cmd->body.surfaceFlags = srf->flags;
+ cmd->body.format = cpu_to_le32(srf->format);
+ for (i = 0; i < DRM_VMW_MAX_SURFACE_FACES; ++i)
+ cmd->body.face[i].numMipLevels = srf->mip_levels[i];
+
+ cmd += 1;
+ cmd_size = (SVGA3dSize *) cmd;
+ src_size = srf->sizes;
+
+ for (i = 0; i < srf->num_sizes; ++i, cmd_size++, src_size++) {
+ cmd_size->width = src_size->width;
+ cmd_size->height = src_size->height;
+ cmd_size->depth = src_size->depth;
+ }
+}
+
+/**
+ * vmw_surface_dma_encode - Encode a surface_dma command.
+ *
+ * @srf: Pointer to a struct vmw_surface object.
+ * @cmd_space: Pointer to memory area in which the commands should be encoded.
+ * @ptr: Pointer to an SVGAGuestPtr indicating where the surface contents
+ * should be placed or read from.
+ * @to_surface: Boolean whether to DMA to the surface or from the surface.
+ */
+static void vmw_surface_dma_encode(struct vmw_surface *srf,
+ void *cmd_space,
+ const SVGAGuestPtr *ptr,
+ bool to_surface)
+{
+ uint32_t i;
+ struct vmw_surface_dma *cmd = (struct vmw_surface_dma *)cmd_space;
+ const struct svga3d_surface_desc *desc =
+ svga3dsurface_get_desc(srf->format);
+
+ for (i = 0; i < srf->num_sizes; ++i) {
+ SVGA3dCmdHeader *header = &cmd->header;
+ SVGA3dCmdSurfaceDMA *body = &cmd->body;
+ SVGA3dCopyBox *cb = &cmd->cb;
+ SVGA3dCmdSurfaceDMASuffix *suffix = &cmd->suffix;
+ const struct vmw_surface_offset *cur_offset = &srf->offsets[i];
+ const struct drm_vmw_size *cur_size = &srf->sizes[i];
+
+ header->id = SVGA_3D_CMD_SURFACE_DMA;
+ header->size = sizeof(*body) + sizeof(*cb) + sizeof(*suffix);
+
+ body->guest.ptr = *ptr;
+ body->guest.ptr.offset += cur_offset->bo_offset;
+ body->guest.pitch = svga3dsurface_calculate_pitch(desc,
+ cur_size);
+ body->host.sid = srf->res.id;
+ body->host.face = cur_offset->face;
+ body->host.mipmap = cur_offset->mip;
+ body->transfer = ((to_surface) ? SVGA3D_WRITE_HOST_VRAM :
+ SVGA3D_READ_HOST_VRAM);
+ cb->x = 0;
+ cb->y = 0;
+ cb->z = 0;
+ cb->srcx = 0;
+ cb->srcy = 0;
+ cb->srcz = 0;
+ cb->w = cur_size->width;
+ cb->h = cur_size->height;
+ cb->d = cur_size->depth;
+
+ suffix->suffixSize = sizeof(*suffix);
+ suffix->maximumOffset =
+ svga3dsurface_get_image_buffer_size(desc, cur_size,
+ body->guest.pitch);
+ suffix->flags.discard = 0;
+ suffix->flags.unsynchronized = 0;
+ suffix->flags.reserved = 0;
+ ++cmd;
+ }
+};
+
+
+/**
+ * vmw_hw_surface_destroy - destroy a Device surface
+ *
+ * @res: Pointer to a struct vmw_resource embedded in a struct
+ * vmw_surface.
+ *
+ * Destroys a the device surface associated with a struct vmw_surface if
+ * any, and adjusts accounting and resource count accordingly.
+ */
+static void vmw_hw_surface_destroy(struct vmw_resource *res)
+{
+
+ struct vmw_private *dev_priv = res->dev_priv;
+ struct vmw_surface *srf;
+ void *cmd;
+
+ if (res->id != -1) {
+
+ cmd = vmw_fifo_reserve(dev_priv, vmw_surface_destroy_size());
+ if (unlikely(cmd == NULL)) {
+ DRM_ERROR("Failed reserving FIFO space for surface "
+ "destruction.\n");
+ return;
+ }
+
+ vmw_surface_destroy_encode(res->id, cmd);
+ vmw_fifo_commit(dev_priv, vmw_surface_destroy_size());
+
+ /*
+ * used_memory_size_atomic, or separate lock
+ * to avoid taking dev_priv::cmdbuf_mutex in
+ * the destroy path.
+ */
+
+ mutex_lock(&dev_priv->cmdbuf_mutex);
+ srf = vmw_res_to_srf(res);
+ dev_priv->used_memory_size -= res->backup_size;
+ mutex_unlock(&dev_priv->cmdbuf_mutex);
+ }
+ vmw_3d_resource_dec(dev_priv, false);
+}
+
+/**
+ * vmw_legacy_srf_create - Create a device surface as part of the
+ * resource validation process.
+ *
+ * @res: Pointer to a struct vmw_surface.
+ *
+ * If the surface doesn't have a hw id.
+ *
+ * Returns -EBUSY if there wasn't sufficient device resources to
+ * complete the validation. Retry after freeing up resources.
+ *
+ * May return other errors if the kernel is out of guest resources.
+ */
+static int vmw_legacy_srf_create(struct vmw_resource *res)
+{
+ struct vmw_private *dev_priv = res->dev_priv;
+ struct vmw_surface *srf;
+ uint32_t submit_size;
+ uint8_t *cmd;
+ int ret;
+
+ if (likely(res->id != -1))
+ return 0;
+
+ srf = vmw_res_to_srf(res);
+ if (unlikely(dev_priv->used_memory_size + res->backup_size >=
+ dev_priv->memory_size))
+ return -EBUSY;
+
+ /*
+ * Alloc id for the resource.
+ */
+
+ ret = vmw_resource_alloc_id(res);
+ if (unlikely(ret != 0)) {
+ DRM_ERROR("Failed to allocate a surface id.\n");
+ goto out_no_id;
+ }
+
+ if (unlikely(res->id >= SVGA3D_MAX_SURFACE_IDS)) {
+ ret = -EBUSY;
+ goto out_no_fifo;
+ }
+
+ /*
+ * Encode surface define- commands.
+ */
+
+ submit_size = vmw_surface_define_size(srf);
+ cmd = vmw_fifo_reserve(dev_priv, submit_size);
+ if (unlikely(cmd == NULL)) {
+ DRM_ERROR("Failed reserving FIFO space for surface "
+ "creation.\n");
+ ret = -ENOMEM;
+ goto out_no_fifo;
+ }
+
+ vmw_surface_define_encode(srf, cmd);
+ vmw_fifo_commit(dev_priv, submit_size);
+ /*
+ * Surface memory usage accounting.
+ */
+
+ dev_priv->used_memory_size += res->backup_size;
+ return 0;
+
+out_no_fifo:
+ vmw_resource_release_id(res);
+out_no_id:
+ return ret;
+}
+
+/**
+ * vmw_legacy_srf_dma - Copy backup data to or from a legacy surface.
+ *
+ * @res: Pointer to a struct vmw_res embedded in a struct
+ * vmw_surface.
+ * @val_buf: Pointer to a struct ttm_validate_buffer containing
+ * information about the backup buffer.
+ * @bind: Boolean wether to DMA to the surface.
+ *
+ * Transfer backup data to or from a legacy surface as part of the
+ * validation process.
+ * May return other errors if the kernel is out of guest resources.
+ * The backup buffer will be fenced or idle upon successful completion,
+ * and if the surface needs persistent backup storage, the backup buffer
+ * will also be returned reserved iff @bind is true.
+ */
+static int vmw_legacy_srf_dma(struct vmw_resource *res,
+ struct ttm_validate_buffer *val_buf,
+ bool bind)
+{
+ SVGAGuestPtr ptr;
+ struct vmw_fence_obj *fence;
+ uint32_t submit_size;
+ struct vmw_surface *srf = vmw_res_to_srf(res);
+ uint8_t *cmd;
+ struct vmw_private *dev_priv = res->dev_priv;
+
+ BUG_ON(val_buf->bo == NULL);
+
+ submit_size = vmw_surface_dma_size(srf);
+ cmd = vmw_fifo_reserve(dev_priv, submit_size);
+ if (unlikely(cmd == NULL)) {
+ DRM_ERROR("Failed reserving FIFO space for surface "
+ "DMA.\n");
+ return -ENOMEM;
+ }
+ vmw_bo_get_guest_ptr(val_buf->bo, &ptr);
+ vmw_surface_dma_encode(srf, cmd, &ptr, bind);
+
+ vmw_fifo_commit(dev_priv, submit_size);
+
+ /*
+ * Create a fence object and fence the backup buffer.
+ */
+
+ (void) vmw_execbuf_fence_commands(NULL, dev_priv,
+ &fence, NULL);
+
+ vmw_fence_single_bo(val_buf->bo, fence);
+
+ if (likely(fence != NULL))
+ vmw_fence_obj_unreference(&fence);
+
+ return 0;
+}
+
+/**
+ * vmw_legacy_srf_bind - Perform a legacy surface bind as part of the
+ * surface validation process.
+ *
+ * @res: Pointer to a struct vmw_res embedded in a struct
+ * vmw_surface.
+ * @val_buf: Pointer to a struct ttm_validate_buffer containing
+ * information about the backup buffer.
+ *
+ * This function will copy backup data to the surface if the
+ * backup buffer is dirty.
+ */
+static int vmw_legacy_srf_bind(struct vmw_resource *res,
+ struct ttm_validate_buffer *val_buf)
+{
+ if (!res->backup_dirty)
+ return 0;
+
+ return vmw_legacy_srf_dma(res, val_buf, true);
+}
+
+
+/**
+ * vmw_legacy_srf_unbind - Perform a legacy surface unbind as part of the
+ * surface eviction process.
+ *
+ * @res: Pointer to a struct vmw_res embedded in a struct
+ * vmw_surface.
+ * @val_buf: Pointer to a struct ttm_validate_buffer containing
+ * information about the backup buffer.
+ *
+ * This function will copy backup data from the surface.
+ */
+static int vmw_legacy_srf_unbind(struct vmw_resource *res,
+ bool readback,
+ struct ttm_validate_buffer *val_buf)
+{
+ if (unlikely(readback))
+ return vmw_legacy_srf_dma(res, val_buf, false);
+ return 0;
+}
+
+/**
+ * vmw_legacy_srf_destroy - Destroy a device surface as part of a
+ * resource eviction process.
+ *
+ * @res: Pointer to a struct vmw_res embedded in a struct
+ * vmw_surface.
+ */
+static int vmw_legacy_srf_destroy(struct vmw_resource *res)
+{
+ struct vmw_private *dev_priv = res->dev_priv;
+ uint32_t submit_size;
+ uint8_t *cmd;
+
+ BUG_ON(res->id == -1);
+
+ /*
+ * Encode the dma- and surface destroy commands.
+ */
+
+ submit_size = vmw_surface_destroy_size();
+ cmd = vmw_fifo_reserve(dev_priv, submit_size);
+ if (unlikely(cmd == NULL)) {
+ DRM_ERROR("Failed reserving FIFO space for surface "
+ "eviction.\n");
+ return -ENOMEM;
+ }
+
+ vmw_surface_destroy_encode(res->id, cmd);
+ vmw_fifo_commit(dev_priv, submit_size);
+
+ /*
+ * Surface memory usage accounting.
+ */
+
+ dev_priv->used_memory_size -= res->backup_size;
+
+ /*
+ * Release the surface ID.
+ */
+
+ vmw_resource_release_id(res);
+
+ return 0;
+}
+
+
+/**
+ * vmw_surface_init - initialize a struct vmw_surface
+ *
+ * @dev_priv: Pointer to a device private struct.
+ * @srf: Pointer to the struct vmw_surface to initialize.
+ * @res_free: Pointer to a resource destructor used to free
+ * the object.
+ */
+static int vmw_surface_init(struct vmw_private *dev_priv,
+ struct vmw_surface *srf,
+ void (*res_free) (struct vmw_resource *res))
+{
+ int ret;
+ struct vmw_resource *res = &srf->res;
+
+ BUG_ON(res_free == NULL);
+ (void) vmw_3d_resource_inc(dev_priv, false);
+ ret = vmw_resource_init(dev_priv, res, true, res_free,
+ &vmw_legacy_surface_func);
+
+ if (unlikely(ret != 0)) {
+ vmw_3d_resource_dec(dev_priv, false);
+ res_free(res);
+ return ret;
+ }
+
+ /*
+ * The surface won't be visible to hardware until a
+ * surface validate.
+ */
+
+ vmw_resource_activate(res, vmw_hw_surface_destroy);
+ return ret;
+}
+
+/**
+ * vmw_user_surface_base_to_res - TTM base object to resource converter for
+ * user visible surfaces
+ *
+ * @base: Pointer to a TTM base object
+ *
+ * Returns the struct vmw_resource embedded in a struct vmw_surface
+ * for the user-visible object identified by the TTM base object @base.
+ */
+static struct vmw_resource *
+vmw_user_surface_base_to_res(struct ttm_base_object *base)
+{
+ return &(container_of(base, struct vmw_user_surface, base)->srf.res);
+}
+
+/**
+ * vmw_user_surface_free - User visible surface resource destructor
+ *
+ * @res: A struct vmw_resource embedded in a struct vmw_surface.
+ */
+static void vmw_user_surface_free(struct vmw_resource *res)
+{
+ struct vmw_surface *srf = vmw_res_to_srf(res);
+ struct vmw_user_surface *user_srf =
+ container_of(srf, struct vmw_user_surface, srf);
+ struct vmw_private *dev_priv = srf->res.dev_priv;
+ uint32_t size = user_srf->size;
+
+ kfree(srf->offsets);
+ kfree(srf->sizes);
+ kfree(srf->snooper.image);
+ ttm_base_object_kfree(user_srf, base);
+ ttm_mem_global_free(vmw_mem_glob(dev_priv), size);
+}
+
+/**
+ * vmw_user_surface_free - User visible surface TTM base object destructor
+ *
+ * @p_base: Pointer to a pointer to a TTM base object
+ * embedded in a struct vmw_user_surface.
+ *
+ * Drops the base object's reference on its resource, and the
+ * pointer pointed to by *p_base is set to NULL.
+ */
+static void vmw_user_surface_base_release(struct ttm_base_object **p_base)
+{
+ struct ttm_base_object *base = *p_base;
+ struct vmw_user_surface *user_srf =
+ container_of(base, struct vmw_user_surface, base);
+ struct vmw_resource *res = &user_srf->srf.res;
+
+ *p_base = NULL;
+ vmw_resource_unreference(&res);
+}
+
+/**
+ * vmw_user_surface_destroy_ioctl - Ioctl function implementing
+ * the user surface destroy functionality.
+ *
+ * @dev: Pointer to a struct drm_device.
+ * @data: Pointer to data copied from / to user-space.
+ * @file_priv: Pointer to a drm file private structure.
+ */
+int vmw_surface_destroy_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
+{
+ struct drm_vmw_surface_arg *arg = (struct drm_vmw_surface_arg *)data;
+ struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
+
+ return ttm_ref_object_base_unref(tfile, arg->sid, TTM_REF_USAGE);
+}
+
+/**
+ * vmw_user_surface_define_ioctl - Ioctl function implementing
+ * the user surface define functionality.
+ *
+ * @dev: Pointer to a struct drm_device.
+ * @data: Pointer to data copied from / to user-space.
+ * @file_priv: Pointer to a drm file private structure.
+ */
+int vmw_surface_define_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
+{
+ struct vmw_private *dev_priv = vmw_priv(dev);
+ struct vmw_user_surface *user_srf;
+ struct vmw_surface *srf;
+ struct vmw_resource *res;
+ struct vmw_resource *tmp;
+ union drm_vmw_surface_create_arg *arg =
+ (union drm_vmw_surface_create_arg *)data;
+ struct drm_vmw_surface_create_req *req = &arg->req;
+ struct drm_vmw_surface_arg *rep = &arg->rep;
+ struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
+ struct drm_vmw_size __user *user_sizes;
+ int ret;
+ int i, j;
+ uint32_t cur_bo_offset;
+ struct drm_vmw_size *cur_size;
+ struct vmw_surface_offset *cur_offset;
+ uint32_t num_sizes;
+ uint32_t size;
+ struct vmw_master *vmaster = vmw_master(file_priv->master);
+ const struct svga3d_surface_desc *desc;
+
+ if (unlikely(vmw_user_surface_size == 0))
+ vmw_user_surface_size = ttm_round_pot(sizeof(*user_srf)) +
+ 128;
+
+ num_sizes = 0;
+ for (i = 0; i < DRM_VMW_MAX_SURFACE_FACES; ++i)
+ num_sizes += req->mip_levels[i];
+
+ if (num_sizes > DRM_VMW_MAX_SURFACE_FACES *
+ DRM_VMW_MAX_MIP_LEVELS)
+ return -EINVAL;
+
+ size = vmw_user_surface_size + 128 +
+ ttm_round_pot(num_sizes * sizeof(struct drm_vmw_size)) +
+ ttm_round_pot(num_sizes * sizeof(struct vmw_surface_offset));
+
+
+ desc = svga3dsurface_get_desc(req->format);
+ if (unlikely(desc->block_desc == SVGA3DBLOCKDESC_NONE)) {
+ DRM_ERROR("Invalid surface format for surface creation.\n");
+ return -EINVAL;
+ }
+
+ ret = ttm_read_lock(&vmaster->lock, true);
+ if (unlikely(ret != 0))
+ return ret;
+
+ ret = ttm_mem_global_alloc(vmw_mem_glob(dev_priv),
+ size, false, true);
+ if (unlikely(ret != 0)) {
+ if (ret != -ERESTARTSYS)
+ DRM_ERROR("Out of graphics memory for surface"
+ " creation.\n");
+ goto out_unlock;
+ }
+
+ user_srf = kzalloc(sizeof(*user_srf), GFP_KERNEL);
+ if (unlikely(user_srf == NULL)) {
+ ret = -ENOMEM;
+ goto out_no_user_srf;
+ }
+
+ srf = &user_srf->srf;
+ res = &srf->res;
+
+ srf->flags = req->flags;
+ srf->format = req->format;
+ srf->scanout = req->scanout;
+
+ memcpy(srf->mip_levels, req->mip_levels, sizeof(srf->mip_levels));
+ srf->num_sizes = num_sizes;
+ user_srf->size = size;
+
+ srf->sizes = kmalloc(srf->num_sizes * sizeof(*srf->sizes), GFP_KERNEL);
+ if (unlikely(srf->sizes == NULL)) {
+ ret = -ENOMEM;
+ goto out_no_sizes;
+ }
+ srf->offsets = kmalloc(srf->num_sizes * sizeof(*srf->offsets),
+ GFP_KERNEL);
+ if (unlikely(srf->sizes == NULL)) {
+ ret = -ENOMEM;
+ goto out_no_offsets;
+ }
+
+ user_sizes = (struct drm_vmw_size __user *)(unsigned long)
+ req->size_addr;
+
+ ret = copy_from_user(srf->sizes, user_sizes,
+ srf->num_sizes * sizeof(*srf->sizes));
+ if (unlikely(ret != 0)) {
+ ret = -EFAULT;
+ goto out_no_copy;
+ }
+
+ srf->base_size = *srf->sizes;
+ srf->autogen_filter = SVGA3D_TEX_FILTER_NONE;
+ srf->multisample_count = 1;
+
+ cur_bo_offset = 0;
+ cur_offset = srf->offsets;
+ cur_size = srf->sizes;
+
+ for (i = 0; i < DRM_VMW_MAX_SURFACE_FACES; ++i) {
+ for (j = 0; j < srf->mip_levels[i]; ++j) {
+ uint32_t stride = svga3dsurface_calculate_pitch
+ (desc, cur_size);
+
+ cur_offset->face = i;
+ cur_offset->mip = j;
+ cur_offset->bo_offset = cur_bo_offset;
+ cur_bo_offset += svga3dsurface_get_image_buffer_size
+ (desc, cur_size, stride);
+ ++cur_offset;
+ ++cur_size;
+ }
+ }
+ res->backup_size = cur_bo_offset;
+ if (srf->scanout &&
+ srf->num_sizes == 1 &&
+ srf->sizes[0].width == 64 &&
+ srf->sizes[0].height == 64 &&
+ srf->format == SVGA3D_A8R8G8B8) {
+
+ srf->snooper.image = kmalloc(64 * 64 * 4, GFP_KERNEL);
+ /* clear the image */
+ if (srf->snooper.image) {
+ memset(srf->snooper.image, 0x00, 64 * 64 * 4);
+ } else {
+ DRM_ERROR("Failed to allocate cursor_image\n");
+ ret = -ENOMEM;
+ goto out_no_copy;
+ }
+ } else {
+ srf->snooper.image = NULL;
+ }
+ srf->snooper.crtc = NULL;
+
+ user_srf->base.shareable = false;
+ user_srf->base.tfile = NULL;
+
+ /**
+ * From this point, the generic resource management functions
+ * destroy the object on failure.
+ */
+
+ ret = vmw_surface_init(dev_priv, srf, vmw_user_surface_free);
+ if (unlikely(ret != 0))
+ goto out_unlock;
+
+ tmp = vmw_resource_reference(&srf->res);
+ ret = ttm_base_object_init(tfile, &user_srf->base,
+ req->shareable, VMW_RES_SURFACE,
+ &vmw_user_surface_base_release, NULL);
+
+ if (unlikely(ret != 0)) {
+ vmw_resource_unreference(&tmp);
+ vmw_resource_unreference(&res);
+ goto out_unlock;
+ }
+
+ rep->sid = user_srf->base.hash.key;
+ vmw_resource_unreference(&res);
+
+ ttm_read_unlock(&vmaster->lock);
+ return 0;
+out_no_copy:
+ kfree(srf->offsets);
+out_no_offsets:
+ kfree(srf->sizes);
+out_no_sizes:
+ ttm_base_object_kfree(user_srf, base);
+out_no_user_srf:
+ ttm_mem_global_free(vmw_mem_glob(dev_priv), size);
+out_unlock:
+ ttm_read_unlock(&vmaster->lock);
+ return ret;
+}
+
+/**
+ * vmw_user_surface_define_ioctl - Ioctl function implementing
+ * the user surface reference functionality.
+ *
+ * @dev: Pointer to a struct drm_device.
+ * @data: Pointer to data copied from / to user-space.
+ * @file_priv: Pointer to a drm file private structure.
+ */
+int vmw_surface_reference_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
+{
+ union drm_vmw_surface_reference_arg *arg =
+ (union drm_vmw_surface_reference_arg *)data;
+ struct drm_vmw_surface_arg *req = &arg->req;
+ struct drm_vmw_surface_create_req *rep = &arg->rep;
+ struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
+ struct vmw_surface *srf;
+ struct vmw_user_surface *user_srf;
+ struct drm_vmw_size __user *user_sizes;
+ struct ttm_base_object *base;
+ int ret = -EINVAL;
+
+ base = ttm_base_object_lookup(tfile, req->sid);
+ if (unlikely(base == NULL)) {
+ DRM_ERROR("Could not find surface to reference.\n");
+ return -EINVAL;
+ }
+
+ if (unlikely(base->object_type != VMW_RES_SURFACE))
+ goto out_bad_resource;
+
+ user_srf = container_of(base, struct vmw_user_surface, base);
+ srf = &user_srf->srf;
+
+ ret = ttm_ref_object_add(tfile, &user_srf->base, TTM_REF_USAGE, NULL);
+ if (unlikely(ret != 0)) {
+ DRM_ERROR("Could not add a reference to a surface.\n");
+ goto out_no_reference;
+ }
+
+ rep->flags = srf->flags;
+ rep->format = srf->format;
+ memcpy(rep->mip_levels, srf->mip_levels, sizeof(srf->mip_levels));
+ user_sizes = (struct drm_vmw_size __user *)(unsigned long)
+ rep->size_addr;
+
+ if (user_sizes)
+ ret = copy_to_user(user_sizes, srf->sizes,
+ srf->num_sizes * sizeof(*srf->sizes));
+ if (unlikely(ret != 0)) {
+ DRM_ERROR("copy_to_user failed %p %u\n",
+ user_sizes, srf->num_sizes);
+ ret = -EFAULT;
+ }
+out_bad_resource:
+out_no_reference:
+ ttm_base_object_unref(&base);
+
+ return ret;
+}
diff --git a/drivers/hid/hid-microsoft.c b/drivers/hid/hid-microsoft.c
index f676c01bb471..6fcd466d0825 100644
--- a/drivers/hid/hid-microsoft.c
+++ b/drivers/hid/hid-microsoft.c
@@ -46,9 +46,9 @@ static __u8 *ms_report_fixup(struct hid_device *hdev, __u8 *rdesc,
rdesc[559] = 0x45;
}
/* the same as above (s/usage/physical/) */
- if ((quirks & MS_RDESC_3K) && *rsize == 106 &&
- !memcmp((char []){ 0x19, 0x00, 0x29, 0xff },
- &rdesc[94], 4)) {
+ if ((quirks & MS_RDESC_3K) && *rsize == 106 && rdesc[94] == 0x19 &&
+ rdesc[95] == 0x00 && rdesc[96] == 0x29 &&
+ rdesc[97] == 0xff) {
rdesc[94] = 0x35;
rdesc[96] = 0x45;
}
diff --git a/drivers/hid/hidraw.c b/drivers/hid/hidraw.c
index 17d15bb610d1..7c47fc3f7b2b 100644
--- a/drivers/hid/hidraw.c
+++ b/drivers/hid/hidraw.c
@@ -42,7 +42,6 @@ static struct cdev hidraw_cdev;
static struct class *hidraw_class;
static struct hidraw *hidraw_table[HIDRAW_MAX_DEVICES];
static DEFINE_MUTEX(minors_lock);
-static void drop_ref(struct hidraw *hid, int exists_bit);
static ssize_t hidraw_read(struct file *file, char __user *buffer, size_t count, loff_t *ppos)
{
@@ -114,7 +113,7 @@ static ssize_t hidraw_send_report(struct file *file, const char __user *buffer,
__u8 *buf;
int ret = 0;
- if (!hidraw_table[minor] || !hidraw_table[minor]->exist) {
+ if (!hidraw_table[minor]) {
ret = -ENODEV;
goto out;
}
@@ -262,7 +261,7 @@ static int hidraw_open(struct inode *inode, struct file *file)
}
mutex_lock(&minors_lock);
- if (!hidraw_table[minor] || !hidraw_table[minor]->exist) {
+ if (!hidraw_table[minor]) {
err = -ENODEV;
goto out_unlock;
}
@@ -299,12 +298,36 @@ out:
static int hidraw_release(struct inode * inode, struct file * file)
{
unsigned int minor = iminor(inode);
+ struct hidraw *dev;
struct hidraw_list *list = file->private_data;
+ int ret;
+ int i;
+
+ mutex_lock(&minors_lock);
+ if (!hidraw_table[minor]) {
+ ret = -ENODEV;
+ goto unlock;
+ }
- drop_ref(hidraw_table[minor], 0);
list_del(&list->node);
+ dev = hidraw_table[minor];
+ if (!--dev->open) {
+ if (list->hidraw->exist) {
+ hid_hw_power(dev->hid, PM_HINT_NORMAL);
+ hid_hw_close(dev->hid);
+ } else {
+ kfree(list->hidraw);
+ }
+ }
+
+ for (i = 0; i < HIDRAW_BUFFER_SIZE; ++i)
+ kfree(list->buffer[i].value);
kfree(list);
- return 0;
+ ret = 0;
+unlock:
+ mutex_unlock(&minors_lock);
+
+ return ret;
}
static long hidraw_ioctl(struct file *file, unsigned int cmd,
@@ -506,7 +529,21 @@ EXPORT_SYMBOL_GPL(hidraw_connect);
void hidraw_disconnect(struct hid_device *hid)
{
struct hidraw *hidraw = hid->hidraw;
- drop_ref(hidraw, 1);
+
+ mutex_lock(&minors_lock);
+ hidraw->exist = 0;
+
+ device_destroy(hidraw_class, MKDEV(hidraw_major, hidraw->minor));
+
+ hidraw_table[hidraw->minor] = NULL;
+
+ if (hidraw->open) {
+ hid_hw_close(hid);
+ wake_up_interruptible(&hidraw->wait);
+ } else {
+ kfree(hidraw);
+ }
+ mutex_unlock(&minors_lock);
}
EXPORT_SYMBOL_GPL(hidraw_disconnect);
@@ -555,23 +592,3 @@ void hidraw_exit(void)
unregister_chrdev_region(dev_id, HIDRAW_MAX_DEVICES);
}
-
-static void drop_ref(struct hidraw *hidraw, int exists_bit)
-{
- mutex_lock(&minors_lock);
- if (exists_bit) {
- hid_hw_close(hidraw->hid);
- hidraw->exist = 0;
- if (hidraw->open)
- wake_up_interruptible(&hidraw->wait);
- } else {
- --hidraw->open;
- }
-
- if (!hidraw->open && !hidraw->exist) {
- device_destroy(hidraw_class, MKDEV(hidraw_major, hidraw->minor));
- hidraw_table[hidraw->minor] = NULL;
- kfree(hidraw);
- }
- mutex_unlock(&minors_lock);
-}
diff --git a/drivers/hwmon/asb100.c b/drivers/hwmon/asb100.c
index a227be47149f..520e5bf4f76d 100644
--- a/drivers/hwmon/asb100.c
+++ b/drivers/hwmon/asb100.c
@@ -32,7 +32,7 @@
* ASB100-A supports pwm1, while plain ASB100 does not. There is no known
* way for the driver to tell which one is there.
*
- * Chip #vin #fanin #pwm #temp wchipid vendid i2c ISA
+ * Chip #vin #fanin #pwm #temp wchipid vendid i2c ISA
* asb100 7 3 1 4 0x31 0x0694 yes no
*/
diff --git a/drivers/hwmon/w83627ehf.c b/drivers/hwmon/w83627ehf.c
index 1821b7423d5b..de3c7e04c3b5 100644
--- a/drivers/hwmon/w83627ehf.c
+++ b/drivers/hwmon/w83627ehf.c
@@ -2083,6 +2083,7 @@ static int __devinit w83627ehf_probe(struct platform_device *pdev)
mutex_init(&data->lock);
mutex_init(&data->update_lock);
data->name = w83627ehf_device_names[sio_data->kind];
+ data->bank = 0xff; /* Force initial bank selection */
platform_set_drvdata(pdev, data);
/* 627EHG and 627EHF have 10 voltage inputs; 627DHG and 667HG have 9 */
diff --git a/drivers/hwmon/w83627hf.c b/drivers/hwmon/w83627hf.c
index 5b1a6a666441..af1589908709 100644
--- a/drivers/hwmon/w83627hf.c
+++ b/drivers/hwmon/w83627hf.c
@@ -25,7 +25,7 @@
/*
* Supports following chips:
*
- * Chip #vin #fanin #pwm #temp wchipid vendid i2c ISA
+ * Chip #vin #fanin #pwm #temp wchipid vendid i2c ISA
* w83627hf 9 3 2 3 0x20 0x5ca3 no yes(LPC)
* w83627thf 7 3 3 3 0x90 0x5ca3 no yes(LPC)
* w83637hf 7 3 3 3 0x80 0x5ca3 no yes(LPC)
diff --git a/drivers/hwmon/w83781d.c b/drivers/hwmon/w83781d.c
index 5a5046d94c3e..20f11d31da40 100644
--- a/drivers/hwmon/w83781d.c
+++ b/drivers/hwmon/w83781d.c
@@ -24,7 +24,7 @@
/*
* Supports following chips:
*
- * Chip #vin #fanin #pwm #temp wchipid vendid i2c ISA
+ * Chip #vin #fanin #pwm #temp wchipid vendid i2c ISA
* as99127f 7 3 0 3 0x31 0x12c3 yes no
* as99127f rev.2 (type_name = as99127f) 0x31 0x5ca3 yes no
* w83781d 7 3 0 3 0x10-1 0x5ca3 yes yes
diff --git a/drivers/hwmon/w83791d.c b/drivers/hwmon/w83791d.c
index 39ab7bcc616e..ed397c645198 100644
--- a/drivers/hwmon/w83791d.c
+++ b/drivers/hwmon/w83791d.c
@@ -22,7 +22,7 @@
/*
* Supports following chips:
*
- * Chip #vin #fanin #pwm #temp wchipid vendid i2c ISA
+ * Chip #vin #fanin #pwm #temp wchipid vendid i2c ISA
* w83791d 10 5 5 3 0x71 0x5ca3 yes no
*
* The w83791d chip appears to be part way between the 83781d and the
diff --git a/drivers/hwmon/w83792d.c b/drivers/hwmon/w83792d.c
index 053645279f38..301942d08453 100644
--- a/drivers/hwmon/w83792d.c
+++ b/drivers/hwmon/w83792d.c
@@ -31,7 +31,7 @@
/*
* Supports following chips:
*
- * Chip #vin #fanin #pwm #temp wchipid vendid i2c ISA
+ * Chip #vin #fanin #pwm #temp wchipid vendid i2c ISA
* w83792d 9 7 7 3 0x7a 0x5ca3 yes no
*/
diff --git a/drivers/hwmon/w83l786ng.c b/drivers/hwmon/w83l786ng.c
index f0e8286c3c70..79710bcac2f7 100644
--- a/drivers/hwmon/w83l786ng.c
+++ b/drivers/hwmon/w83l786ng.c
@@ -20,7 +20,7 @@
/*
* Supports following chips:
*
- * Chip #vin #fanin #pwm #temp wchipid vendid i2c ISA
+ * Chip #vin #fanin #pwm #temp wchipid vendid i2c ISA
* w83l786ng 3 2 2 2 0x7b 0x5ca3 yes no
*/
diff --git a/drivers/i2c/busses/i2c-at91.c b/drivers/i2c/busses/i2c-at91.c
index aa59a254be2c..c02bf208084f 100644
--- a/drivers/i2c/busses/i2c-at91.c
+++ b/drivers/i2c/busses/i2c-at91.c
@@ -39,6 +39,7 @@
#define AT91_TWI_STOP 0x0002 /* Send a Stop Condition */
#define AT91_TWI_MSEN 0x0004 /* Master Transfer Enable */
#define AT91_TWI_SVDIS 0x0020 /* Slave Transfer Disable */
+#define AT91_TWI_QUICK 0x0040 /* SMBus quick command */
#define AT91_TWI_SWRST 0x0080 /* Software Reset */
#define AT91_TWI_MMR 0x0004 /* Master Mode Register */
@@ -212,7 +213,11 @@ static int at91_do_twi_transfer(struct at91_twi_dev *dev)
INIT_COMPLETION(dev->cmd_complete);
dev->transfer_status = 0;
- if (dev->msg->flags & I2C_M_RD) {
+
+ if (!dev->buf_len) {
+ at91_twi_write(dev, AT91_TWI_CR, AT91_TWI_QUICK);
+ at91_twi_write(dev, AT91_TWI_IER, AT91_TWI_TXCOMP);
+ } else if (dev->msg->flags & I2C_M_RD) {
unsigned start_flags = AT91_TWI_START;
if (at91_twi_read(dev, AT91_TWI_SR) & AT91_TWI_RXRDY) {
diff --git a/drivers/i2c/busses/i2c-mxs.c b/drivers/i2c/busses/i2c-mxs.c
index 286ca1917820..0670da79ee5e 100644
--- a/drivers/i2c/busses/i2c-mxs.c
+++ b/drivers/i2c/busses/i2c-mxs.c
@@ -287,12 +287,14 @@ read_init_dma_fail:
select_init_dma_fail:
dma_unmap_sg(i2c->dev, &i2c->sg_io[0], 1, DMA_TO_DEVICE);
select_init_pio_fail:
+ dmaengine_terminate_all(i2c->dmach);
return -EINVAL;
/* Write failpath. */
write_init_dma_fail:
dma_unmap_sg(i2c->dev, i2c->sg_io, 2, DMA_TO_DEVICE);
write_init_pio_fail:
+ dmaengine_terminate_all(i2c->dmach);
return -EINVAL;
}
diff --git a/drivers/i2c/busses/i2c-omap.c b/drivers/i2c/busses/i2c-omap.c
index db31eaed6ea5..3525c9e62cb0 100644
--- a/drivers/i2c/busses/i2c-omap.c
+++ b/drivers/i2c/busses/i2c-omap.c
@@ -43,7 +43,6 @@
#include <linux/slab.h>
#include <linux/i2c-omap.h>
#include <linux/pm_runtime.h>
-#include <linux/pm_qos.h>
/* I2C controller revisions */
#define OMAP_I2C_OMAP1_REV_2 0x20
@@ -187,8 +186,9 @@ struct omap_i2c_dev {
int reg_shift; /* bit shift for I2C register addresses */
struct completion cmd_complete;
struct resource *ioarea;
- u32 latency; /* maximum MPU wkup latency */
- struct pm_qos_request pm_qos_request;
+ u32 latency; /* maximum mpu wkup latency */
+ void (*set_mpu_wkup_lat)(struct device *dev,
+ long latency);
u32 speed; /* Speed of bus in kHz */
u32 dtrev; /* extra revision from DT */
u32 flags;
@@ -494,7 +494,9 @@ static void omap_i2c_resize_fifo(struct omap_i2c_dev *dev, u8 size, bool is_rx)
dev->b_hw = 1; /* Enable hardware fixes */
/* calculate wakeup latency constraint for MPU */
- dev->latency = (1000000 * dev->threshold) / (1000 * dev->speed / 8);
+ if (dev->set_mpu_wkup_lat != NULL)
+ dev->latency = (1000000 * dev->threshold) /
+ (1000 * dev->speed / 8);
}
/*
@@ -522,6 +524,9 @@ static int omap_i2c_xfer_msg(struct i2c_adapter *adap,
dev->buf = msg->buf;
dev->buf_len = msg->len;
+ /* make sure writes to dev->buf_len are ordered */
+ barrier();
+
omap_i2c_write_reg(dev, OMAP_I2C_CNT_REG, dev->buf_len);
/* Clear the FIFO Buffers */
@@ -579,7 +584,6 @@ static int omap_i2c_xfer_msg(struct i2c_adapter *adap,
*/
timeout = wait_for_completion_timeout(&dev->cmd_complete,
OMAP_I2C_TIMEOUT);
- dev->buf_len = 0;
if (timeout == 0) {
dev_err(dev->dev, "controller timed out\n");
omap_i2c_init(dev);
@@ -629,16 +633,8 @@ omap_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg msgs[], int num)
if (r < 0)
goto out;
- /*
- * When waiting for completion of a i2c transfer, we need to
- * set a wake up latency constraint for the MPU. This is to
- * ensure quick enough wakeup from idle, when transfer
- * completes.
- */
- if (dev->latency)
- pm_qos_add_request(&dev->pm_qos_request,
- PM_QOS_CPU_DMA_LATENCY,
- dev->latency);
+ if (dev->set_mpu_wkup_lat != NULL)
+ dev->set_mpu_wkup_lat(dev->dev, dev->latency);
for (i = 0; i < num; i++) {
r = omap_i2c_xfer_msg(adap, &msgs[i], (i == (num - 1)));
@@ -646,8 +642,8 @@ omap_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg msgs[], int num)
break;
}
- if (dev->latency)
- pm_qos_remove_request(&dev->pm_qos_request);
+ if (dev->set_mpu_wkup_lat != NULL)
+ dev->set_mpu_wkup_lat(dev->dev, -1);
if (r == 0)
r = num;
@@ -1104,6 +1100,7 @@ omap_i2c_probe(struct platform_device *pdev)
} else if (pdata != NULL) {
dev->speed = pdata->clkrate;
dev->flags = pdata->flags;
+ dev->set_mpu_wkup_lat = pdata->set_mpu_wkup_lat;
dev->dtrev = pdata->rev;
}
@@ -1159,8 +1156,9 @@ omap_i2c_probe(struct platform_device *pdev)
dev->b_hw = 1; /* Enable hardware fixes */
/* calculate wakeup latency constraint for MPU */
- dev->latency = (1000000 * dev->fifo_size) /
- (1000 * dev->speed / 8);
+ if (dev->set_mpu_wkup_lat != NULL)
+ dev->latency = (1000000 * dev->fifo_size) /
+ (1000 * dev->speed / 8);
}
/* reset ASAP, clearing any IRQs */
diff --git a/drivers/i2c/busses/i2c-s3c2410.c b/drivers/i2c/busses/i2c-s3c2410.c
index 3e0335f1fc60..9d902725bac9 100644
--- a/drivers/i2c/busses/i2c-s3c2410.c
+++ b/drivers/i2c/busses/i2c-s3c2410.c
@@ -806,6 +806,7 @@ static int s3c24xx_i2c_parse_dt_gpio(struct s3c24xx_i2c *i2c)
dev_err(i2c->dev, "invalid gpio[%d]: %d\n", idx, gpio);
goto free_gpio;
}
+ i2c->gpios[idx] = gpio;
ret = gpio_request(gpio, "i2c-bus");
if (ret) {
diff --git a/drivers/i2c/muxes/i2c-mux-pinctrl.c b/drivers/i2c/muxes/i2c-mux-pinctrl.c
index 5f097f309b9f..7fa5b24b16db 100644
--- a/drivers/i2c/muxes/i2c-mux-pinctrl.c
+++ b/drivers/i2c/muxes/i2c-mux-pinctrl.c
@@ -169,7 +169,7 @@ static int __devinit i2c_mux_pinctrl_probe(struct platform_device *pdev)
mux->busses = devm_kzalloc(&pdev->dev,
sizeof(mux->busses) * mux->pdata->bus_count,
GFP_KERNEL);
- if (!mux->states) {
+ if (!mux->busses) {
dev_err(&pdev->dev, "Cannot allocate busses\n");
ret = -ENOMEM;
goto err;
diff --git a/drivers/input/input-mt.c b/drivers/input/input-mt.c
index c0ec7d42c3be..1abbc170d8b7 100644
--- a/drivers/input/input-mt.c
+++ b/drivers/input/input-mt.c
@@ -26,10 +26,14 @@ static void copy_abs(struct input_dev *dev, unsigned int dst, unsigned int src)
* input_mt_init_slots() - initialize MT input slots
* @dev: input device supporting MT events and finger tracking
* @num_slots: number of slots used by the device
+ * @flags: mt tasks to handle in core
*
* This function allocates all necessary memory for MT slot handling
* in the input device, prepares the ABS_MT_SLOT and
* ABS_MT_TRACKING_ID events for use and sets up appropriate buffers.
+ * Depending on the flags set, it also performs pointer emulation and
+ * frame synchronization.
+ *
* May be called repeatedly. Returns -EINVAL if attempting to
* reinitialize with a different number of slots.
*/
diff --git a/drivers/input/mousedev.c b/drivers/input/mousedev.c
index 8f02e3d0e712..4c842c320c2e 100644
--- a/drivers/input/mousedev.c
+++ b/drivers/input/mousedev.c
@@ -12,8 +12,8 @@
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#define MOUSEDEV_MINOR_BASE 32
-#define MOUSEDEV_MINORS 32
-#define MOUSEDEV_MIX 31
+#define MOUSEDEV_MINORS 31
+#define MOUSEDEV_MIX 63
#include <linux/sched.h>
#include <linux/slab.h>
diff --git a/drivers/input/touchscreen/ads7846.c b/drivers/input/touchscreen/ads7846.c
index f02028ec3db6..78e5d9ab0ba7 100644
--- a/drivers/input/touchscreen/ads7846.c
+++ b/drivers/input/touchscreen/ads7846.c
@@ -955,7 +955,8 @@ static int ads7846_resume(struct device *dev)
static SIMPLE_DEV_PM_OPS(ads7846_pm, ads7846_suspend, ads7846_resume);
-static int __devinit ads7846_setup_pendown(struct spi_device *spi, struct ads7846 *ts)
+static int __devinit ads7846_setup_pendown(struct spi_device *spi,
+ struct ads7846 *ts)
{
struct ads7846_platform_data *pdata = spi->dev.platform_data;
int err;
@@ -981,6 +982,9 @@ static int __devinit ads7846_setup_pendown(struct spi_device *spi, struct ads784
ts->gpio_pendown = pdata->gpio_pendown;
+ if (pdata->gpio_pendown_debounce)
+ gpio_set_debounce(pdata->gpio_pendown,
+ pdata->gpio_pendown_debounce);
} else {
dev_err(&spi->dev, "no get_pendown_state nor gpio_pendown?\n");
return -EINVAL;
diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c
index d4a4cd445cab..0badfa48b32b 100644
--- a/drivers/iommu/intel-iommu.c
+++ b/drivers/iommu/intel-iommu.c
@@ -4108,7 +4108,7 @@ static void swap_pci_ref(struct pci_dev **from, struct pci_dev *to)
static int intel_iommu_add_device(struct device *dev)
{
struct pci_dev *pdev = to_pci_dev(dev);
- struct pci_dev *bridge, *dma_pdev;
+ struct pci_dev *bridge, *dma_pdev = NULL;
struct iommu_group *group;
int ret;
@@ -4122,7 +4122,7 @@ static int intel_iommu_add_device(struct device *dev)
dma_pdev = pci_get_domain_bus_and_slot(
pci_domain_nr(pdev->bus),
bridge->subordinate->number, 0);
- else
+ if (!dma_pdev)
dma_pdev = pci_dev_get(bridge);
} else
dma_pdev = pci_dev_get(pdev);
diff --git a/drivers/iommu/tegra-smmu.c b/drivers/iommu/tegra-smmu.c
index a649f146d17b..c0f7a4266263 100644
--- a/drivers/iommu/tegra-smmu.c
+++ b/drivers/iommu/tegra-smmu.c
@@ -1054,6 +1054,7 @@ static int smmu_debugfs_stats_show(struct seq_file *s, void *v)
stats[i], val, offs);
}
seq_printf(s, "\n");
+ dput(dent);
return 0;
}
diff --git a/drivers/irqchip/irq-bcm2835.c b/drivers/irqchip/irq-bcm2835.c
index dc670ccc6978..16c78f1c5ef2 100644
--- a/drivers/irqchip/irq-bcm2835.c
+++ b/drivers/irqchip/irq-bcm2835.c
@@ -168,7 +168,8 @@ static int __init armctrl_of_init(struct device_node *node,
}
static struct of_device_id irq_of_match[] __initconst = {
- { .compatible = "brcm,bcm2835-armctrl-ic", .data = armctrl_of_init }
+ { .compatible = "brcm,bcm2835-armctrl-ic", .data = armctrl_of_init },
+ { }
};
void __init bcm2835_init_irq(void)
diff --git a/drivers/isdn/Kconfig b/drivers/isdn/Kconfig
index a233ed53913a..86cd75a0e84d 100644
--- a/drivers/isdn/Kconfig
+++ b/drivers/isdn/Kconfig
@@ -4,7 +4,7 @@
menuconfig ISDN
bool "ISDN support"
- depends on NET
+ depends on NET && NETDEVICES
depends on !S390 && !UML
---help---
ISDN ("Integrated Services Digital Network", called RNIS in France)
diff --git a/drivers/isdn/i4l/Kconfig b/drivers/isdn/i4l/Kconfig
index 2302fbe70ac6..9c6650ea848e 100644
--- a/drivers/isdn/i4l/Kconfig
+++ b/drivers/isdn/i4l/Kconfig
@@ -6,7 +6,7 @@ if ISDN_I4L
config ISDN_PPP
bool "Support synchronous PPP"
- depends on INET && NETDEVICES
+ depends on INET
select SLHC
help
Over digital connections such as ISDN, there is no need to
diff --git a/drivers/isdn/i4l/isdn_common.c b/drivers/isdn/i4l/isdn_common.c
index 8c610fa6782b..e2a945ee9f05 100644
--- a/drivers/isdn/i4l/isdn_common.c
+++ b/drivers/isdn/i4l/isdn_common.c
@@ -1312,7 +1312,6 @@ isdn_ioctl(struct file *file, uint cmd, ulong arg)
} else
return -EINVAL;
break;
-#ifdef CONFIG_NETDEVICES
case IIOCNETGPN:
/* Get peer phone number of a connected
* isdn network interface */
@@ -1322,7 +1321,6 @@ isdn_ioctl(struct file *file, uint cmd, ulong arg)
return isdn_net_getpeer(&phone, argp);
} else
return -EINVAL;
-#endif
default:
return -EINVAL;
}
@@ -1352,7 +1350,6 @@ isdn_ioctl(struct file *file, uint cmd, ulong arg)
case IIOCNETLCR:
printk(KERN_INFO "INFO: ISDN_ABC_LCR_SUPPORT not enabled\n");
return -ENODEV;
-#ifdef CONFIG_NETDEVICES
case IIOCNETAIF:
/* Add a network-interface */
if (arg) {
@@ -1491,7 +1488,6 @@ isdn_ioctl(struct file *file, uint cmd, ulong arg)
return -EFAULT;
return isdn_net_force_hangup(name);
break;
-#endif /* CONFIG_NETDEVICES */
case IIOCSETVER:
dev->net_verbose = arg;
printk(KERN_INFO "isdn: Verbose-Level is %d\n", dev->net_verbose);
diff --git a/drivers/leds/ledtrig-cpu.c b/drivers/leds/ledtrig-cpu.c
index b312056da14d..4239b3955ff0 100644
--- a/drivers/leds/ledtrig-cpu.c
+++ b/drivers/leds/ledtrig-cpu.c
@@ -33,8 +33,6 @@
struct led_trigger_cpu {
char name[MAX_NAME_LEN];
struct led_trigger *_trig;
- struct mutex lock;
- int lock_is_inited;
};
static DEFINE_PER_CPU(struct led_trigger_cpu, cpu_trig);
@@ -50,12 +48,6 @@ void ledtrig_cpu(enum cpu_led_event ledevt)
{
struct led_trigger_cpu *trig = &__get_cpu_var(cpu_trig);
- /* mutex lock should be initialized before calling mutex_call() */
- if (!trig->lock_is_inited)
- return;
-
- mutex_lock(&trig->lock);
-
/* Locate the correct CPU LED */
switch (ledevt) {
case CPU_LED_IDLE_END:
@@ -75,8 +67,6 @@ void ledtrig_cpu(enum cpu_led_event ledevt)
/* Will leave the LED as it is */
break;
}
-
- mutex_unlock(&trig->lock);
}
EXPORT_SYMBOL(ledtrig_cpu);
@@ -117,14 +107,9 @@ static int __init ledtrig_cpu_init(void)
for_each_possible_cpu(cpu) {
struct led_trigger_cpu *trig = &per_cpu(cpu_trig, cpu);
- mutex_init(&trig->lock);
-
snprintf(trig->name, MAX_NAME_LEN, "cpu%d", cpu);
- mutex_lock(&trig->lock);
led_trigger_register_simple(trig->name, &trig->_trig);
- trig->lock_is_inited = 1;
- mutex_unlock(&trig->lock);
}
register_syscore_ops(&ledtrig_cpu_syscore_ops);
@@ -142,15 +127,9 @@ static void __exit ledtrig_cpu_exit(void)
for_each_possible_cpu(cpu) {
struct led_trigger_cpu *trig = &per_cpu(cpu_trig, cpu);
- mutex_lock(&trig->lock);
-
led_trigger_unregister_simple(trig->_trig);
trig->_trig = NULL;
memset(trig->name, 0, MAX_NAME_LEN);
- trig->lock_is_inited = 0;
-
- mutex_unlock(&trig->lock);
- mutex_destroy(&trig->lock);
}
unregister_syscore_ops(&ledtrig_cpu_syscore_ops);
diff --git a/drivers/md/dm.c b/drivers/md/dm.c
index 02db9183ca01..77e6eff41cae 100644
--- a/drivers/md/dm.c
+++ b/drivers/md/dm.c
@@ -740,8 +740,14 @@ static void rq_completed(struct mapped_device *md, int rw, int run_queue)
if (!md_in_flight(md))
wake_up(&md->wait);
+ /*
+ * Run this off this callpath, as drivers could invoke end_io while
+ * inside their request_fn (and holding the queue lock). Calling
+ * back into ->request_fn() could deadlock attempting to grab the
+ * queue lock again.
+ */
if (run_queue)
- blk_run_queue(md->queue);
+ blk_run_queue_async(md->queue);
/*
* dm_put() must be at the end of this function. See the comment above
diff --git a/drivers/md/md.c b/drivers/md/md.c
index 9ab768acfb62..61200717687b 100644
--- a/drivers/md/md.c
+++ b/drivers/md/md.c
@@ -1817,10 +1817,10 @@ retry:
memset(bbp, 0xff, PAGE_SIZE);
for (i = 0 ; i < bb->count ; i++) {
- u64 internal_bb = *p++;
+ u64 internal_bb = p[i];
u64 store_bb = ((BB_OFFSET(internal_bb) << 10)
| BB_LEN(internal_bb));
- *bbp++ = cpu_to_le64(store_bb);
+ bbp[i] = cpu_to_le64(store_bb);
}
bb->changed = 0;
if (read_seqretry(&bb->lock, seq))
@@ -5294,7 +5294,7 @@ void md_stop_writes(struct mddev *mddev)
}
EXPORT_SYMBOL_GPL(md_stop_writes);
-void md_stop(struct mddev *mddev)
+static void __md_stop(struct mddev *mddev)
{
mddev->ready = 0;
mddev->pers->stop(mddev);
@@ -5304,6 +5304,18 @@ void md_stop(struct mddev *mddev)
mddev->pers = NULL;
clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
}
+
+void md_stop(struct mddev *mddev)
+{
+ /* stop the array and free an attached data structures.
+ * This is called from dm-raid
+ */
+ __md_stop(mddev);
+ bitmap_destroy(mddev);
+ if (mddev->bio_set)
+ bioset_free(mddev->bio_set);
+}
+
EXPORT_SYMBOL_GPL(md_stop);
static int md_set_readonly(struct mddev *mddev, struct block_device *bdev)
@@ -5364,7 +5376,7 @@ static int do_md_stop(struct mddev * mddev, int mode,
set_disk_ro(disk, 0);
__md_stop_writes(mddev);
- md_stop(mddev);
+ __md_stop(mddev);
mddev->queue->merge_bvec_fn = NULL;
mddev->queue->backing_dev_info.congested_fn = NULL;
@@ -7936,9 +7948,9 @@ int md_is_badblock(struct badblocks *bb, sector_t s, int sectors,
sector_t *first_bad, int *bad_sectors)
{
int hi;
- int lo = 0;
+ int lo;
u64 *p = bb->page;
- int rv = 0;
+ int rv;
sector_t target = s + sectors;
unsigned seq;
@@ -7953,7 +7965,8 @@ int md_is_badblock(struct badblocks *bb, sector_t s, int sectors,
retry:
seq = read_seqbegin(&bb->lock);
-
+ lo = 0;
+ rv = 0;
hi = bb->count;
/* Binary search between lo and hi for 'target'
diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
index d1295aff4173..0d5d0ff2c0f7 100644
--- a/drivers/md/raid10.c
+++ b/drivers/md/raid10.c
@@ -499,7 +499,7 @@ static void raid10_end_write_request(struct bio *bio, int error)
*/
one_write_done(r10_bio);
if (dec_rdev)
- rdev_dec_pending(conf->mirrors[dev].rdev, conf->mddev);
+ rdev_dec_pending(rdev, conf->mddev);
}
/*
@@ -1334,18 +1334,21 @@ retry_write:
blocked_rdev = rrdev;
break;
}
+ if (rdev && (test_bit(Faulty, &rdev->flags)
+ || test_bit(Unmerged, &rdev->flags)))
+ rdev = NULL;
if (rrdev && (test_bit(Faulty, &rrdev->flags)
|| test_bit(Unmerged, &rrdev->flags)))
rrdev = NULL;
r10_bio->devs[i].bio = NULL;
r10_bio->devs[i].repl_bio = NULL;
- if (!rdev || test_bit(Faulty, &rdev->flags) ||
- test_bit(Unmerged, &rdev->flags)) {
+
+ if (!rdev && !rrdev) {
set_bit(R10BIO_Degraded, &r10_bio->state);
continue;
}
- if (test_bit(WriteErrorSeen, &rdev->flags)) {
+ if (rdev && test_bit(WriteErrorSeen, &rdev->flags)) {
sector_t first_bad;
sector_t dev_sector = r10_bio->devs[i].addr;
int bad_sectors;
@@ -1387,8 +1390,10 @@ retry_write:
max_sectors = good_sectors;
}
}
- r10_bio->devs[i].bio = bio;
- atomic_inc(&rdev->nr_pending);
+ if (rdev) {
+ r10_bio->devs[i].bio = bio;
+ atomic_inc(&rdev->nr_pending);
+ }
if (rrdev) {
r10_bio->devs[i].repl_bio = bio;
atomic_inc(&rrdev->nr_pending);
@@ -1444,69 +1449,71 @@ retry_write:
for (i = 0; i < conf->copies; i++) {
struct bio *mbio;
int d = r10_bio->devs[i].devnum;
- if (!r10_bio->devs[i].bio)
- continue;
+ if (r10_bio->devs[i].bio) {
+ struct md_rdev *rdev = conf->mirrors[d].rdev;
+ mbio = bio_clone_mddev(bio, GFP_NOIO, mddev);
+ md_trim_bio(mbio, r10_bio->sector - bio->bi_sector,
+ max_sectors);
+ r10_bio->devs[i].bio = mbio;
+
+ mbio->bi_sector = (r10_bio->devs[i].addr+
+ choose_data_offset(r10_bio,
+ rdev));
+ mbio->bi_bdev = rdev->bdev;
+ mbio->bi_end_io = raid10_end_write_request;
+ mbio->bi_rw = WRITE | do_sync | do_fua | do_discard;
+ mbio->bi_private = r10_bio;
- mbio = bio_clone_mddev(bio, GFP_NOIO, mddev);
- md_trim_bio(mbio, r10_bio->sector - bio->bi_sector,
- max_sectors);
- r10_bio->devs[i].bio = mbio;
+ atomic_inc(&r10_bio->remaining);
- mbio->bi_sector = (r10_bio->devs[i].addr+
- choose_data_offset(r10_bio,
- conf->mirrors[d].rdev));
- mbio->bi_bdev = conf->mirrors[d].rdev->bdev;
- mbio->bi_end_io = raid10_end_write_request;
- mbio->bi_rw = WRITE | do_sync | do_fua | do_discard;
- mbio->bi_private = r10_bio;
+ cb = blk_check_plugged(raid10_unplug, mddev,
+ sizeof(*plug));
+ if (cb)
+ plug = container_of(cb, struct raid10_plug_cb,
+ cb);
+ else
+ plug = NULL;
+ spin_lock_irqsave(&conf->device_lock, flags);
+ if (plug) {
+ bio_list_add(&plug->pending, mbio);
+ plug->pending_cnt++;
+ } else {
+ bio_list_add(&conf->pending_bio_list, mbio);
+ conf->pending_count++;
+ }
+ spin_unlock_irqrestore(&conf->device_lock, flags);
+ if (!plug)
+ md_wakeup_thread(mddev->thread);
+ }
- atomic_inc(&r10_bio->remaining);
+ if (r10_bio->devs[i].repl_bio) {
+ struct md_rdev *rdev = conf->mirrors[d].replacement;
+ if (rdev == NULL) {
+ /* Replacement just got moved to main 'rdev' */
+ smp_mb();
+ rdev = conf->mirrors[d].rdev;
+ }
+ mbio = bio_clone_mddev(bio, GFP_NOIO, mddev);
+ md_trim_bio(mbio, r10_bio->sector - bio->bi_sector,
+ max_sectors);
+ r10_bio->devs[i].repl_bio = mbio;
+
+ mbio->bi_sector = (r10_bio->devs[i].addr +
+ choose_data_offset(
+ r10_bio, rdev));
+ mbio->bi_bdev = rdev->bdev;
+ mbio->bi_end_io = raid10_end_write_request;
+ mbio->bi_rw = WRITE | do_sync | do_fua | do_discard;
+ mbio->bi_private = r10_bio;
- cb = blk_check_plugged(raid10_unplug, mddev, sizeof(*plug));
- if (cb)
- plug = container_of(cb, struct raid10_plug_cb, cb);
- else
- plug = NULL;
- spin_lock_irqsave(&conf->device_lock, flags);
- if (plug) {
- bio_list_add(&plug->pending, mbio);
- plug->pending_cnt++;
- } else {
+ atomic_inc(&r10_bio->remaining);
+ spin_lock_irqsave(&conf->device_lock, flags);
bio_list_add(&conf->pending_bio_list, mbio);
conf->pending_count++;
+ spin_unlock_irqrestore(&conf->device_lock, flags);
+ if (!mddev_check_plugged(mddev))
+ md_wakeup_thread(mddev->thread);
}
- spin_unlock_irqrestore(&conf->device_lock, flags);
- if (!plug)
- md_wakeup_thread(mddev->thread);
-
- if (!r10_bio->devs[i].repl_bio)
- continue;
-
- mbio = bio_clone_mddev(bio, GFP_NOIO, mddev);
- md_trim_bio(mbio, r10_bio->sector - bio->bi_sector,
- max_sectors);
- r10_bio->devs[i].repl_bio = mbio;
-
- /* We are actively writing to the original device
- * so it cannot disappear, so the replacement cannot
- * become NULL here
- */
- mbio->bi_sector = (r10_bio->devs[i].addr +
- choose_data_offset(
- r10_bio,
- conf->mirrors[d].replacement));
- mbio->bi_bdev = conf->mirrors[d].replacement->bdev;
- mbio->bi_end_io = raid10_end_write_request;
- mbio->bi_rw = WRITE | do_sync | do_fua | do_discard;
- mbio->bi_private = r10_bio;
-
- atomic_inc(&r10_bio->remaining);
- spin_lock_irqsave(&conf->device_lock, flags);
- bio_list_add(&conf->pending_bio_list, mbio);
- conf->pending_count++;
- spin_unlock_irqrestore(&conf->device_lock, flags);
- if (!mddev_check_plugged(mddev))
- md_wakeup_thread(mddev->thread);
}
/* Don't remove the bias on 'remaining' (one_write_done) until
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
index c5439dce0295..a4502686e7a8 100644
--- a/drivers/md/raid5.c
+++ b/drivers/md/raid5.c
@@ -2774,10 +2774,12 @@ static void handle_stripe_clean_event(struct r5conf *conf,
dev = &sh->dev[i];
if (!test_bit(R5_LOCKED, &dev->flags) &&
(test_bit(R5_UPTODATE, &dev->flags) ||
- test_and_clear_bit(R5_Discard, &dev->flags))) {
+ test_bit(R5_Discard, &dev->flags))) {
/* We can return any write requests */
struct bio *wbi, *wbi2;
pr_debug("Return write for disc %d\n", i);
+ if (test_and_clear_bit(R5_Discard, &dev->flags))
+ clear_bit(R5_UPTODATE, &dev->flags);
wbi = dev->written;
dev->written = NULL;
while (wbi && wbi->bi_sector <
@@ -2795,7 +2797,8 @@ static void handle_stripe_clean_event(struct r5conf *conf,
!test_bit(STRIPE_DEGRADED, &sh->state),
0);
}
- }
+ } else if (test_bit(R5_Discard, &sh->dev[i].flags))
+ clear_bit(R5_Discard, &sh->dev[i].flags);
if (test_and_clear_bit(STRIPE_FULL_WRITE, &sh->state))
if (atomic_dec_and_test(&conf->pending_full_writes))
@@ -3490,40 +3493,6 @@ static void handle_stripe(struct stripe_head *sh)
handle_failed_sync(conf, sh, &s);
}
- /*
- * might be able to return some write requests if the parity blocks
- * are safe, or on a failed drive
- */
- pdev = &sh->dev[sh->pd_idx];
- s.p_failed = (s.failed >= 1 && s.failed_num[0] == sh->pd_idx)
- || (s.failed >= 2 && s.failed_num[1] == sh->pd_idx);
- qdev = &sh->dev[sh->qd_idx];
- s.q_failed = (s.failed >= 1 && s.failed_num[0] == sh->qd_idx)
- || (s.failed >= 2 && s.failed_num[1] == sh->qd_idx)
- || conf->level < 6;
-
- if (s.written &&
- (s.p_failed || ((test_bit(R5_Insync, &pdev->flags)
- && !test_bit(R5_LOCKED, &pdev->flags)
- && (test_bit(R5_UPTODATE, &pdev->flags) ||
- test_bit(R5_Discard, &pdev->flags))))) &&
- (s.q_failed || ((test_bit(R5_Insync, &qdev->flags)
- && !test_bit(R5_LOCKED, &qdev->flags)
- && (test_bit(R5_UPTODATE, &qdev->flags) ||
- test_bit(R5_Discard, &qdev->flags))))))
- handle_stripe_clean_event(conf, sh, disks, &s.return_bi);
-
- /* Now we might consider reading some blocks, either to check/generate
- * parity, or to satisfy requests
- * or to load a block that is being partially written.
- */
- if (s.to_read || s.non_overwrite
- || (conf->level == 6 && s.to_write && s.failed)
- || (s.syncing && (s.uptodate + s.compute < disks))
- || s.replacing
- || s.expanding)
- handle_stripe_fill(sh, &s, disks);
-
/* Now we check to see if any write operations have recently
* completed
*/
@@ -3561,6 +3530,40 @@ static void handle_stripe(struct stripe_head *sh)
s.dec_preread_active = 1;
}
+ /*
+ * might be able to return some write requests if the parity blocks
+ * are safe, or on a failed drive
+ */
+ pdev = &sh->dev[sh->pd_idx];
+ s.p_failed = (s.failed >= 1 && s.failed_num[0] == sh->pd_idx)
+ || (s.failed >= 2 && s.failed_num[1] == sh->pd_idx);
+ qdev = &sh->dev[sh->qd_idx];
+ s.q_failed = (s.failed >= 1 && s.failed_num[0] == sh->qd_idx)
+ || (s.failed >= 2 && s.failed_num[1] == sh->qd_idx)
+ || conf->level < 6;
+
+ if (s.written &&
+ (s.p_failed || ((test_bit(R5_Insync, &pdev->flags)
+ && !test_bit(R5_LOCKED, &pdev->flags)
+ && (test_bit(R5_UPTODATE, &pdev->flags) ||
+ test_bit(R5_Discard, &pdev->flags))))) &&
+ (s.q_failed || ((test_bit(R5_Insync, &qdev->flags)
+ && !test_bit(R5_LOCKED, &qdev->flags)
+ && (test_bit(R5_UPTODATE, &qdev->flags) ||
+ test_bit(R5_Discard, &qdev->flags))))))
+ handle_stripe_clean_event(conf, sh, disks, &s.return_bi);
+
+ /* Now we might consider reading some blocks, either to check/generate
+ * parity, or to satisfy requests
+ * or to load a block that is being partially written.
+ */
+ if (s.to_read || s.non_overwrite
+ || (conf->level == 6 && s.to_write && s.failed)
+ || (s.syncing && (s.uptodate + s.compute < disks))
+ || s.replacing
+ || s.expanding)
+ handle_stripe_fill(sh, &s, disks);
+
/* Now to consider new write requests and what else, if anything
* should be read. We do not handle new writes when:
* 1/ A 'write' operation (copy+xor) is already in flight.
@@ -5529,6 +5532,10 @@ static int run(struct mddev *mddev)
* discard data disk but write parity disk
*/
stripe = stripe * PAGE_SIZE;
+ /* Round up to power of 2, as discard handling
+ * currently assumes that */
+ while ((stripe-1) & stripe)
+ stripe = (stripe | (stripe-1)) + 1;
mddev->queue->limits.discard_alignment = stripe;
mddev->queue->limits.discard_granularity = stripe;
/*
diff --git a/drivers/mmc/host/dw_mmc-exynos.c b/drivers/mmc/host/dw_mmc-exynos.c
index 660bbc528862..4d50da618166 100644
--- a/drivers/mmc/host/dw_mmc-exynos.c
+++ b/drivers/mmc/host/dw_mmc-exynos.c
@@ -208,7 +208,7 @@ static unsigned long exynos5250_dwmmc_caps[4] = {
MMC_CAP_CMD23,
};
-static struct dw_mci_drv_data exynos5250_drv_data = {
+static const struct dw_mci_drv_data exynos5250_drv_data = {
.caps = exynos5250_dwmmc_caps,
.init = dw_mci_exynos_priv_init,
.setup_clock = dw_mci_exynos_setup_clock,
@@ -220,14 +220,14 @@ static struct dw_mci_drv_data exynos5250_drv_data = {
static const struct of_device_id dw_mci_exynos_match[] = {
{ .compatible = "samsung,exynos5250-dw-mshc",
- .data = (void *)&exynos5250_drv_data, },
+ .data = &exynos5250_drv_data, },
{},
};
-MODULE_DEVICE_TABLE(of, dw_mci_pltfm_match);
+MODULE_DEVICE_TABLE(of, dw_mci_exynos_match);
int dw_mci_exynos_probe(struct platform_device *pdev)
{
- struct dw_mci_drv_data *drv_data;
+ const struct dw_mci_drv_data *drv_data;
const struct of_device_id *match;
match = of_match_node(dw_mci_exynos_match, pdev->dev.of_node);
diff --git a/drivers/mmc/host/dw_mmc-pltfm.c b/drivers/mmc/host/dw_mmc-pltfm.c
index c960ca7ffbe6..917936bee5d5 100644
--- a/drivers/mmc/host/dw_mmc-pltfm.c
+++ b/drivers/mmc/host/dw_mmc-pltfm.c
@@ -24,7 +24,7 @@
#include "dw_mmc.h"
int dw_mci_pltfm_register(struct platform_device *pdev,
- struct dw_mci_drv_data *drv_data)
+ const struct dw_mci_drv_data *drv_data)
{
struct dw_mci *host;
struct resource *regs;
@@ -50,8 +50,8 @@ int dw_mci_pltfm_register(struct platform_device *pdev,
if (!host->regs)
return -ENOMEM;
- if (host->drv_data->init) {
- ret = host->drv_data->init(host);
+ if (drv_data && drv_data->init) {
+ ret = drv_data->init(host);
if (ret)
return ret;
}
diff --git a/drivers/mmc/host/dw_mmc-pltfm.h b/drivers/mmc/host/dw_mmc-pltfm.h
index 301f24541fc2..2ac37b81de4d 100644
--- a/drivers/mmc/host/dw_mmc-pltfm.h
+++ b/drivers/mmc/host/dw_mmc-pltfm.h
@@ -13,7 +13,7 @@
#define _DW_MMC_PLTFM_H_
extern int dw_mci_pltfm_register(struct platform_device *pdev,
- struct dw_mci_drv_data *drv_data);
+ const struct dw_mci_drv_data *drv_data);
extern int __devexit dw_mci_pltfm_remove(struct platform_device *pdev);
extern const struct dev_pm_ops dw_mci_pltfm_pmops;
diff --git a/drivers/mmc/host/dw_mmc.c b/drivers/mmc/host/dw_mmc.c
index c2828f35c3b8..c0667c8af2bd 100644
--- a/drivers/mmc/host/dw_mmc.c
+++ b/drivers/mmc/host/dw_mmc.c
@@ -232,6 +232,7 @@ static u32 dw_mci_prepare_command(struct mmc_host *mmc, struct mmc_command *cmd)
{
struct mmc_data *data;
struct dw_mci_slot *slot = mmc_priv(mmc);
+ struct dw_mci_drv_data *drv_data = slot->host->drv_data;
u32 cmdr;
cmd->error = -EINPROGRESS;
@@ -261,8 +262,8 @@ static u32 dw_mci_prepare_command(struct mmc_host *mmc, struct mmc_command *cmd)
cmdr |= SDMMC_CMD_DAT_WR;
}
- if (slot->host->drv_data->prepare_command)
- slot->host->drv_data->prepare_command(slot->host, &cmdr);
+ if (drv_data && drv_data->prepare_command)
+ drv_data->prepare_command(slot->host, &cmdr);
return cmdr;
}
@@ -434,7 +435,7 @@ static int dw_mci_idmac_init(struct dw_mci *host)
return 0;
}
-static struct dw_mci_dma_ops dw_mci_idmac_ops = {
+static const struct dw_mci_dma_ops dw_mci_idmac_ops = {
.init = dw_mci_idmac_init,
.start = dw_mci_idmac_start_dma,
.stop = dw_mci_idmac_stop_dma,
@@ -772,6 +773,7 @@ static void dw_mci_request(struct mmc_host *mmc, struct mmc_request *mrq)
static void dw_mci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
{
struct dw_mci_slot *slot = mmc_priv(mmc);
+ struct dw_mci_drv_data *drv_data = slot->host->drv_data;
u32 regs;
/* set default 1 bit mode */
@@ -807,8 +809,8 @@ static void dw_mci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
slot->clock = ios->clock;
}
- if (slot->host->drv_data->set_ios)
- slot->host->drv_data->set_ios(slot->host, ios);
+ if (drv_data && drv_data->set_ios)
+ drv_data->set_ios(slot->host, ios);
switch (ios->power_mode) {
case MMC_POWER_UP:
@@ -1815,6 +1817,7 @@ static int dw_mci_init_slot(struct dw_mci *host, unsigned int id)
{
struct mmc_host *mmc;
struct dw_mci_slot *slot;
+ struct dw_mci_drv_data *drv_data = host->drv_data;
int ctrl_id, ret;
u8 bus_width;
@@ -1854,8 +1857,8 @@ static int dw_mci_init_slot(struct dw_mci *host, unsigned int id)
} else {
ctrl_id = to_platform_device(host->dev)->id;
}
- if (host->drv_data && host->drv_data->caps)
- mmc->caps |= host->drv_data->caps[ctrl_id];
+ if (drv_data && drv_data->caps)
+ mmc->caps |= drv_data->caps[ctrl_id];
if (host->pdata->caps2)
mmc->caps2 = host->pdata->caps2;
@@ -1867,10 +1870,10 @@ static int dw_mci_init_slot(struct dw_mci *host, unsigned int id)
else
bus_width = 1;
- if (host->drv_data->setup_bus) {
+ if (drv_data && drv_data->setup_bus) {
struct device_node *slot_np;
slot_np = dw_mci_of_find_slot_node(host->dev, slot->id);
- ret = host->drv_data->setup_bus(host, slot_np, bus_width);
+ ret = drv_data->setup_bus(host, slot_np, bus_width);
if (ret)
goto err_setup_bus;
}
@@ -1968,7 +1971,7 @@ static void dw_mci_init_dma(struct dw_mci *host)
/* Determine which DMA interface to use */
#ifdef CONFIG_MMC_DW_IDMAC
host->dma_ops = &dw_mci_idmac_ops;
- dev_info(&host->dev, "Using internal DMA controller.\n");
+ dev_info(host->dev, "Using internal DMA controller.\n");
#endif
if (!host->dma_ops)
@@ -2035,6 +2038,7 @@ static struct dw_mci_board *dw_mci_parse_dt(struct dw_mci *host)
struct dw_mci_board *pdata;
struct device *dev = host->dev;
struct device_node *np = dev->of_node;
+ struct dw_mci_drv_data *drv_data = host->drv_data;
int idx, ret;
pdata = devm_kzalloc(dev, sizeof(*pdata), GFP_KERNEL);
@@ -2062,8 +2066,8 @@ static struct dw_mci_board *dw_mci_parse_dt(struct dw_mci *host)
of_property_read_u32(np, "card-detect-delay", &pdata->detect_delay_ms);
- if (host->drv_data->parse_dt) {
- ret = host->drv_data->parse_dt(host);
+ if (drv_data && drv_data->parse_dt) {
+ ret = drv_data->parse_dt(host);
if (ret)
return ERR_PTR(ret);
}
@@ -2080,6 +2084,7 @@ static struct dw_mci_board *dw_mci_parse_dt(struct dw_mci *host)
int dw_mci_probe(struct dw_mci *host)
{
+ struct dw_mci_drv_data *drv_data = host->drv_data;
int width, i, ret = 0;
u32 fifo_size;
int init_slots = 0;
@@ -2127,8 +2132,8 @@ int dw_mci_probe(struct dw_mci *host)
else
host->bus_hz = clk_get_rate(host->ciu_clk);
- if (host->drv_data->setup_clock) {
- ret = host->drv_data->setup_clock(host);
+ if (drv_data && drv_data->setup_clock) {
+ ret = drv_data->setup_clock(host);
if (ret) {
dev_err(host->dev,
"implementation specific clock setup failed\n");
@@ -2228,6 +2233,21 @@ int dw_mci_probe(struct dw_mci *host)
else
host->num_slots = ((mci_readl(host, HCON) >> 1) & 0x1F) + 1;
+ /*
+ * Enable interrupts for command done, data over, data empty, card det,
+ * receive ready and error such as transmit, receive timeout, crc error
+ */
+ mci_writel(host, RINTSTS, 0xFFFFFFFF);
+ mci_writel(host, INTMASK, SDMMC_INT_CMD_DONE | SDMMC_INT_DATA_OVER |
+ SDMMC_INT_TXDR | SDMMC_INT_RXDR |
+ DW_MCI_ERROR_FLAGS | SDMMC_INT_CD);
+ mci_writel(host, CTRL, SDMMC_CTRL_INT_ENABLE); /* Enable mci interrupt */
+
+ dev_info(host->dev, "DW MMC controller at irq %d, "
+ "%d bit host data width, "
+ "%u deep fifo\n",
+ host->irq, width, fifo_size);
+
/* We need at least one slot to succeed */
for (i = 0; i < host->num_slots; i++) {
ret = dw_mci_init_slot(host, i);
@@ -2257,20 +2277,6 @@ int dw_mci_probe(struct dw_mci *host)
else
host->data_offset = DATA_240A_OFFSET;
- /*
- * Enable interrupts for command done, data over, data empty, card det,
- * receive ready and error such as transmit, receive timeout, crc error
- */
- mci_writel(host, RINTSTS, 0xFFFFFFFF);
- mci_writel(host, INTMASK, SDMMC_INT_CMD_DONE | SDMMC_INT_DATA_OVER |
- SDMMC_INT_TXDR | SDMMC_INT_RXDR |
- DW_MCI_ERROR_FLAGS | SDMMC_INT_CD);
- mci_writel(host, CTRL, SDMMC_CTRL_INT_ENABLE); /* Enable mci interrupt */
-
- dev_info(host->dev, "DW MMC controller at irq %d, "
- "%d bit host data width, "
- "%u deep fifo\n",
- host->irq, width, fifo_size);
if (host->quirks & DW_MCI_QUIRK_IDMAC_DTO)
dev_info(host->dev, "Internal DMAC interrupt fix enabled.\n");
diff --git a/drivers/mmc/host/mxcmmc.c b/drivers/mmc/host/mxcmmc.c
index 565c2e4fac75..6290b7f1ccfe 100644
--- a/drivers/mmc/host/mxcmmc.c
+++ b/drivers/mmc/host/mxcmmc.c
@@ -1134,4 +1134,4 @@ module_platform_driver(mxcmci_driver);
MODULE_DESCRIPTION("i.MX Multimedia Card Interface Driver");
MODULE_AUTHOR("Sascha Hauer, Pengutronix");
MODULE_LICENSE("GPL");
-MODULE_ALIAS("platform:imx-mmc");
+MODULE_ALIAS("platform:mxc-mmc");
diff --git a/drivers/mmc/host/omap_hsmmc.c b/drivers/mmc/host/omap_hsmmc.c
index 54bfd0cc106b..fedd258cc4ea 100644
--- a/drivers/mmc/host/omap_hsmmc.c
+++ b/drivers/mmc/host/omap_hsmmc.c
@@ -178,7 +178,8 @@ struct omap_hsmmc_host {
static int omap_hsmmc_card_detect(struct device *dev, int slot)
{
- struct omap_mmc_platform_data *mmc = dev->platform_data;
+ struct omap_hsmmc_host *host = dev_get_drvdata(dev);
+ struct omap_mmc_platform_data *mmc = host->pdata;
/* NOTE: assumes card detect signal is active-low */
return !gpio_get_value_cansleep(mmc->slots[0].switch_pin);
@@ -186,7 +187,8 @@ static int omap_hsmmc_card_detect(struct device *dev, int slot)
static int omap_hsmmc_get_wp(struct device *dev, int slot)
{
- struct omap_mmc_platform_data *mmc = dev->platform_data;
+ struct omap_hsmmc_host *host = dev_get_drvdata(dev);
+ struct omap_mmc_platform_data *mmc = host->pdata;
/* NOTE: assumes write protect signal is active-high */
return gpio_get_value_cansleep(mmc->slots[0].gpio_wp);
@@ -194,7 +196,8 @@ static int omap_hsmmc_get_wp(struct device *dev, int slot)
static int omap_hsmmc_get_cover_state(struct device *dev, int slot)
{
- struct omap_mmc_platform_data *mmc = dev->platform_data;
+ struct omap_hsmmc_host *host = dev_get_drvdata(dev);
+ struct omap_mmc_platform_data *mmc = host->pdata;
/* NOTE: assumes card detect signal is active-low */
return !gpio_get_value_cansleep(mmc->slots[0].switch_pin);
@@ -204,7 +207,8 @@ static int omap_hsmmc_get_cover_state(struct device *dev, int slot)
static int omap_hsmmc_suspend_cdirq(struct device *dev, int slot)
{
- struct omap_mmc_platform_data *mmc = dev->platform_data;
+ struct omap_hsmmc_host *host = dev_get_drvdata(dev);
+ struct omap_mmc_platform_data *mmc = host->pdata;
disable_irq(mmc->slots[0].card_detect_irq);
return 0;
@@ -212,7 +216,8 @@ static int omap_hsmmc_suspend_cdirq(struct device *dev, int slot)
static int omap_hsmmc_resume_cdirq(struct device *dev, int slot)
{
- struct omap_mmc_platform_data *mmc = dev->platform_data;
+ struct omap_hsmmc_host *host = dev_get_drvdata(dev);
+ struct omap_mmc_platform_data *mmc = host->pdata;
enable_irq(mmc->slots[0].card_detect_irq);
return 0;
@@ -2009,9 +2014,9 @@ static int __devexit omap_hsmmc_remove(struct platform_device *pdev)
clk_put(host->dbclk);
}
- mmc_free_host(host->mmc);
+ omap_hsmmc_gpio_free(host->pdata);
iounmap(host->base);
- omap_hsmmc_gpio_free(pdev->dev.platform_data);
+ mmc_free_host(host->mmc);
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (res)
diff --git a/drivers/mmc/host/sdhci-dove.c b/drivers/mmc/host/sdhci-dove.c
index 90140eb03e36..8fd50a211037 100644
--- a/drivers/mmc/host/sdhci-dove.c
+++ b/drivers/mmc/host/sdhci-dove.c
@@ -19,6 +19,7 @@
* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
+#include <linux/err.h>
#include <linux/io.h>
#include <linux/clk.h>
#include <linux/err.h>
@@ -84,30 +85,32 @@ static int __devinit sdhci_dove_probe(struct platform_device *pdev)
struct sdhci_dove_priv *priv;
int ret;
- ret = sdhci_pltfm_register(pdev, &sdhci_dove_pdata);
- if (ret)
- goto sdhci_dove_register_fail;
-
priv = devm_kzalloc(&pdev->dev, sizeof(struct sdhci_dove_priv),
GFP_KERNEL);
if (!priv) {
dev_err(&pdev->dev, "unable to allocate private data");
- ret = -ENOMEM;
- goto sdhci_dove_allocate_fail;
+ return -ENOMEM;
}
+ priv->clk = clk_get(&pdev->dev, NULL);
+ if (!IS_ERR(priv->clk))
+ clk_prepare_enable(priv->clk);
+
+ ret = sdhci_pltfm_register(pdev, &sdhci_dove_pdata);
+ if (ret)
+ goto sdhci_dove_register_fail;
+
host = platform_get_drvdata(pdev);
pltfm_host = sdhci_priv(host);
pltfm_host->priv = priv;
- priv->clk = clk_get(&pdev->dev, NULL);
- if (!IS_ERR(priv->clk))
- clk_prepare_enable(priv->clk);
return 0;
-sdhci_dove_allocate_fail:
- sdhci_pltfm_unregister(pdev);
sdhci_dove_register_fail:
+ if (!IS_ERR(priv->clk)) {
+ clk_disable_unprepare(priv->clk);
+ clk_put(priv->clk);
+ }
return ret;
}
@@ -117,14 +120,13 @@ static int __devexit sdhci_dove_remove(struct platform_device *pdev)
struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
struct sdhci_dove_priv *priv = pltfm_host->priv;
- if (priv->clk) {
- if (!IS_ERR(priv->clk)) {
- clk_disable_unprepare(priv->clk);
- clk_put(priv->clk);
- }
- devm_kfree(&pdev->dev, priv->clk);
+ sdhci_pltfm_unregister(pdev);
+
+ if (!IS_ERR(priv->clk)) {
+ clk_disable_unprepare(priv->clk);
+ clk_put(priv->clk);
}
- return sdhci_pltfm_unregister(pdev);
+ return 0;
}
static const struct of_device_id sdhci_dove_of_match_table[] __devinitdata = {
diff --git a/drivers/mmc/host/sdhci-of-esdhc.c b/drivers/mmc/host/sdhci-of-esdhc.c
index ae5fcbfa1eef..63d219f57cae 100644
--- a/drivers/mmc/host/sdhci-of-esdhc.c
+++ b/drivers/mmc/host/sdhci-of-esdhc.c
@@ -169,6 +169,16 @@ static void esdhc_of_resume(struct sdhci_host *host)
}
#endif
+static void esdhc_of_platform_init(struct sdhci_host *host)
+{
+ u32 vvn;
+
+ vvn = in_be32(host->ioaddr + SDHCI_SLOT_INT_STATUS);
+ vvn = (vvn & SDHCI_VENDOR_VER_MASK) >> SDHCI_VENDOR_VER_SHIFT;
+ if (vvn == VENDOR_V_22)
+ host->quirks2 |= SDHCI_QUIRK2_HOST_NO_CMD23;
+}
+
static struct sdhci_ops sdhci_esdhc_ops = {
.read_l = esdhc_readl,
.read_w = esdhc_readw,
@@ -180,6 +190,7 @@ static struct sdhci_ops sdhci_esdhc_ops = {
.enable_dma = esdhc_of_enable_dma,
.get_max_clock = esdhc_of_get_max_clock,
.get_min_clock = esdhc_of_get_min_clock,
+ .platform_init = esdhc_of_platform_init,
#ifdef CONFIG_PM
.platform_suspend = esdhc_of_suspend,
.platform_resume = esdhc_of_resume,
diff --git a/drivers/mmc/host/sdhci-pci.c b/drivers/mmc/host/sdhci-pci.c
index 4bb74b042a06..04936f353ced 100644
--- a/drivers/mmc/host/sdhci-pci.c
+++ b/drivers/mmc/host/sdhci-pci.c
@@ -1196,7 +1196,7 @@ static struct sdhci_pci_slot * __devinit sdhci_pci_probe_slot(
return ERR_PTR(-ENODEV);
}
- if (pci_resource_len(pdev, bar) != 0x100) {
+ if (pci_resource_len(pdev, bar) < 0x100) {
dev_err(&pdev->dev, "Invalid iomem size. You may "
"experience problems.\n");
}
diff --git a/drivers/mmc/host/sdhci-pltfm.c b/drivers/mmc/host/sdhci-pltfm.c
index 65551a9709cc..27164457f861 100644
--- a/drivers/mmc/host/sdhci-pltfm.c
+++ b/drivers/mmc/host/sdhci-pltfm.c
@@ -150,6 +150,13 @@ struct sdhci_host *sdhci_pltfm_init(struct platform_device *pdev,
goto err_remap;
}
+ /*
+ * Some platforms need to probe the controller to be able to
+ * determine which caps should be used.
+ */
+ if (host->ops && host->ops->platform_init)
+ host->ops->platform_init(host);
+
platform_set_drvdata(pdev, host);
return host;
diff --git a/drivers/mmc/host/sdhci-s3c.c b/drivers/mmc/host/sdhci-s3c.c
index 2903949594c6..a54dd5d7a5f9 100644
--- a/drivers/mmc/host/sdhci-s3c.c
+++ b/drivers/mmc/host/sdhci-s3c.c
@@ -211,8 +211,8 @@ static void sdhci_s3c_set_clock(struct sdhci_host *host, unsigned int clock)
if (ourhost->cur_clk != best_src) {
struct clk *clk = ourhost->clk_bus[best_src];
- clk_enable(clk);
- clk_disable(ourhost->clk_bus[ourhost->cur_clk]);
+ clk_prepare_enable(clk);
+ clk_disable_unprepare(ourhost->clk_bus[ourhost->cur_clk]);
/* turn clock off to card before changing clock source */
writew(0, host->ioaddr + SDHCI_CLOCK_CONTROL);
@@ -607,7 +607,7 @@ static int __devinit sdhci_s3c_probe(struct platform_device *pdev)
}
/* enable the local io clock and keep it running for the moment. */
- clk_enable(sc->clk_io);
+ clk_prepare_enable(sc->clk_io);
for (clks = 0, ptr = 0; ptr < MAX_BUS_CLK; ptr++) {
struct clk *clk;
@@ -638,7 +638,7 @@ static int __devinit sdhci_s3c_probe(struct platform_device *pdev)
}
#ifndef CONFIG_PM_RUNTIME
- clk_enable(sc->clk_bus[sc->cur_clk]);
+ clk_prepare_enable(sc->clk_bus[sc->cur_clk]);
#endif
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
@@ -747,13 +747,14 @@ static int __devinit sdhci_s3c_probe(struct platform_device *pdev)
sdhci_s3c_setup_card_detect_gpio(sc);
#ifdef CONFIG_PM_RUNTIME
- clk_disable(sc->clk_io);
+ if (pdata->cd_type != S3C_SDHCI_CD_INTERNAL)
+ clk_disable_unprepare(sc->clk_io);
#endif
return 0;
err_req_regs:
#ifndef CONFIG_PM_RUNTIME
- clk_disable(sc->clk_bus[sc->cur_clk]);
+ clk_disable_unprepare(sc->clk_bus[sc->cur_clk]);
#endif
for (ptr = 0; ptr < MAX_BUS_CLK; ptr++) {
if (sc->clk_bus[ptr]) {
@@ -762,7 +763,7 @@ static int __devinit sdhci_s3c_probe(struct platform_device *pdev)
}
err_no_busclks:
- clk_disable(sc->clk_io);
+ clk_disable_unprepare(sc->clk_io);
clk_put(sc->clk_io);
err_io_clk:
@@ -794,7 +795,8 @@ static int __devexit sdhci_s3c_remove(struct platform_device *pdev)
gpio_free(sc->ext_cd_gpio);
#ifdef CONFIG_PM_RUNTIME
- clk_enable(sc->clk_io);
+ if (pdata->cd_type != S3C_SDHCI_CD_INTERNAL)
+ clk_prepare_enable(sc->clk_io);
#endif
sdhci_remove_host(host, 1);
@@ -802,14 +804,14 @@ static int __devexit sdhci_s3c_remove(struct platform_device *pdev)
pm_runtime_disable(&pdev->dev);
#ifndef CONFIG_PM_RUNTIME
- clk_disable(sc->clk_bus[sc->cur_clk]);
+ clk_disable_unprepare(sc->clk_bus[sc->cur_clk]);
#endif
for (ptr = 0; ptr < MAX_BUS_CLK; ptr++) {
if (sc->clk_bus[ptr]) {
clk_put(sc->clk_bus[ptr]);
}
}
- clk_disable(sc->clk_io);
+ clk_disable_unprepare(sc->clk_io);
clk_put(sc->clk_io);
if (pdev->dev.of_node) {
@@ -849,8 +851,8 @@ static int sdhci_s3c_runtime_suspend(struct device *dev)
ret = sdhci_runtime_suspend_host(host);
- clk_disable(ourhost->clk_bus[ourhost->cur_clk]);
- clk_disable(busclk);
+ clk_disable_unprepare(ourhost->clk_bus[ourhost->cur_clk]);
+ clk_disable_unprepare(busclk);
return ret;
}
@@ -861,8 +863,8 @@ static int sdhci_s3c_runtime_resume(struct device *dev)
struct clk *busclk = ourhost->clk_io;
int ret;
- clk_enable(busclk);
- clk_enable(ourhost->clk_bus[ourhost->cur_clk]);
+ clk_prepare_enable(busclk);
+ clk_prepare_enable(ourhost->clk_bus[ourhost->cur_clk]);
ret = sdhci_runtime_resume_host(host);
return ret;
}
diff --git a/drivers/mmc/host/sdhci.c b/drivers/mmc/host/sdhci.c
index 7922adb42386..c7851c0aabce 100644
--- a/drivers/mmc/host/sdhci.c
+++ b/drivers/mmc/host/sdhci.c
@@ -1315,16 +1315,19 @@ static void sdhci_request(struct mmc_host *mmc, struct mmc_request *mrq)
*/
if ((host->flags & SDHCI_NEEDS_RETUNING) &&
!(present_state & (SDHCI_DOING_WRITE | SDHCI_DOING_READ))) {
- /* eMMC uses cmd21 while sd and sdio use cmd19 */
- tuning_opcode = mmc->card->type == MMC_TYPE_MMC ?
- MMC_SEND_TUNING_BLOCK_HS200 :
- MMC_SEND_TUNING_BLOCK;
- spin_unlock_irqrestore(&host->lock, flags);
- sdhci_execute_tuning(mmc, tuning_opcode);
- spin_lock_irqsave(&host->lock, flags);
-
- /* Restore original mmc_request structure */
- host->mrq = mrq;
+ if (mmc->card) {
+ /* eMMC uses cmd21 but sd and sdio use cmd19 */
+ tuning_opcode =
+ mmc->card->type == MMC_TYPE_MMC ?
+ MMC_SEND_TUNING_BLOCK_HS200 :
+ MMC_SEND_TUNING_BLOCK;
+ spin_unlock_irqrestore(&host->lock, flags);
+ sdhci_execute_tuning(mmc, tuning_opcode);
+ spin_lock_irqsave(&host->lock, flags);
+
+ /* Restore original mmc_request structure */
+ host->mrq = mrq;
+ }
}
if (mrq->sbc && !(host->flags & SDHCI_AUTO_CMD23))
@@ -2837,6 +2840,9 @@ int sdhci_add_host(struct sdhci_host *host)
if (!(host->quirks & SDHCI_QUIRK_FORCE_1_BIT_DATA))
mmc->caps |= MMC_CAP_4_BIT_DATA;
+ if (host->quirks2 & SDHCI_QUIRK2_HOST_NO_CMD23)
+ mmc->caps &= ~MMC_CAP_CMD23;
+
if (caps[0] & SDHCI_CAN_DO_HISPD)
mmc->caps |= MMC_CAP_SD_HIGHSPEED | MMC_CAP_MMC_HIGHSPEED;
@@ -2846,9 +2852,12 @@ int sdhci_add_host(struct sdhci_host *host)
/* If vqmmc regulator and no 1.8V signalling, then there's no UHS */
host->vqmmc = regulator_get(mmc_dev(mmc), "vqmmc");
- if (IS_ERR(host->vqmmc)) {
- pr_info("%s: no vqmmc regulator found\n", mmc_hostname(mmc));
- host->vqmmc = NULL;
+ if (IS_ERR_OR_NULL(host->vqmmc)) {
+ if (PTR_ERR(host->vqmmc) < 0) {
+ pr_info("%s: no vqmmc regulator found\n",
+ mmc_hostname(mmc));
+ host->vqmmc = NULL;
+ }
}
else if (regulator_is_supported_voltage(host->vqmmc, 1800000, 1800000))
regulator_enable(host->vqmmc);
@@ -2904,9 +2913,12 @@ int sdhci_add_host(struct sdhci_host *host)
ocr_avail = 0;
host->vmmc = regulator_get(mmc_dev(mmc), "vmmc");
- if (IS_ERR(host->vmmc)) {
- pr_info("%s: no vmmc regulator found\n", mmc_hostname(mmc));
- host->vmmc = NULL;
+ if (IS_ERR_OR_NULL(host->vmmc)) {
+ if (PTR_ERR(host->vmmc) < 0) {
+ pr_info("%s: no vmmc regulator found\n",
+ mmc_hostname(mmc));
+ host->vmmc = NULL;
+ }
} else
regulator_enable(host->vmmc);
diff --git a/drivers/mmc/host/sdhci.h b/drivers/mmc/host/sdhci.h
index 97653ea8942b..71a4a7ed46c5 100644
--- a/drivers/mmc/host/sdhci.h
+++ b/drivers/mmc/host/sdhci.h
@@ -278,6 +278,7 @@ struct sdhci_ops {
void (*hw_reset)(struct sdhci_host *host);
void (*platform_suspend)(struct sdhci_host *host);
void (*platform_resume)(struct sdhci_host *host);
+ void (*platform_init)(struct sdhci_host *host);
};
#ifdef CONFIG_MMC_SDHCI_IO_ACCESSORS
diff --git a/drivers/mmc/host/sh_mmcif.c b/drivers/mmc/host/sh_mmcif.c
index 11d2bc3b51d5..d25bc97dc5c6 100644
--- a/drivers/mmc/host/sh_mmcif.c
+++ b/drivers/mmc/host/sh_mmcif.c
@@ -1466,9 +1466,9 @@ static int __devexit sh_mmcif_remove(struct platform_device *pdev)
platform_set_drvdata(pdev, NULL);
+ clk_disable(host->hclk);
mmc_free_host(host->mmc);
pm_runtime_put_sync(&pdev->dev);
- clk_disable(host->hclk);
pm_runtime_disable(&pdev->dev);
return 0;
diff --git a/drivers/mtd/devices/slram.c b/drivers/mtd/devices/slram.c
index 8f52fc858e48..5a5cd2ace4a6 100644
--- a/drivers/mtd/devices/slram.c
+++ b/drivers/mtd/devices/slram.c
@@ -240,7 +240,7 @@ static int parse_cmdline(char *devname, char *szstart, char *szlength)
if (*(szlength) != '+') {
devlength = simple_strtoul(szlength, &buffer, 0);
- devlength = handle_unit(devlength, buffer) - devstart;
+ devlength = handle_unit(devlength, buffer);
if (devlength < devstart)
goto err_out;
diff --git a/drivers/mtd/nand/nand_base.c b/drivers/mtd/nand/nand_base.c
index ec6841d8e956..1a03b7f673ce 100644
--- a/drivers/mtd/nand/nand_base.c
+++ b/drivers/mtd/nand/nand_base.c
@@ -2983,13 +2983,15 @@ static void nand_decode_ext_id(struct mtd_info *mtd, struct nand_chip *chip,
/*
* Field definitions are in the following datasheets:
* Old style (4,5 byte ID): Samsung K9GAG08U0M (p.32)
- * New style (6 byte ID): Samsung K9GAG08U0F (p.44)
+ * New Samsung (6 byte ID): Samsung K9GAG08U0F (p.44)
* Hynix MLC (6 byte ID): Hynix H27UBG8T2B (p.22)
*
- * Check for ID length, cell type, and Hynix/Samsung ID to decide what
- * to do.
+ * Check for ID length, non-zero 6th byte, cell type, and Hynix/Samsung
+ * ID to decide what to do.
*/
- if (id_len == 6 && id_data[0] == NAND_MFR_SAMSUNG) {
+ if (id_len == 6 && id_data[0] == NAND_MFR_SAMSUNG &&
+ (chip->cellinfo & NAND_CI_CELLTYPE_MSK) &&
+ id_data[5] != 0x00) {
/* Calc pagesize */
mtd->writesize = 2048 << (extid & 0x03);
extid >>= 2;
diff --git a/drivers/mtd/ofpart.c b/drivers/mtd/ofpart.c
index 64be8f0848b0..d9127e2ed808 100644
--- a/drivers/mtd/ofpart.c
+++ b/drivers/mtd/ofpart.c
@@ -121,7 +121,7 @@ static int parse_ofoldpart_partitions(struct mtd_info *master,
nr_parts = plen / sizeof(part[0]);
*pparts = kzalloc(nr_parts * sizeof(*(*pparts)), GFP_KERNEL);
- if (!pparts)
+ if (!*pparts)
return -ENOMEM;
names = of_get_property(dp, "partition-names", &plen);
diff --git a/drivers/mtd/onenand/onenand_base.c b/drivers/mtd/onenand/onenand_base.c
index 7153e0d27101..b3f41f200622 100644
--- a/drivers/mtd/onenand/onenand_base.c
+++ b/drivers/mtd/onenand/onenand_base.c
@@ -3694,7 +3694,7 @@ static int flexonenand_check_blocks_erased(struct mtd_info *mtd, int start, int
* flexonenand_set_boundary - Writes the SLC boundary
* @param mtd - mtd info structure
*/
-int flexonenand_set_boundary(struct mtd_info *mtd, int die,
+static int flexonenand_set_boundary(struct mtd_info *mtd, int die,
int boundary, int lock)
{
struct onenand_chip *this = mtd->priv;
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
index b2530b002125..5f5b69f37d2e 100644
--- a/drivers/net/bonding/bond_main.c
+++ b/drivers/net/bonding/bond_main.c
@@ -1379,6 +1379,8 @@ static void bond_compute_features(struct bonding *bond)
struct net_device *bond_dev = bond->dev;
netdev_features_t vlan_features = BOND_VLAN_FEATURES;
unsigned short max_hard_header_len = ETH_HLEN;
+ unsigned int gso_max_size = GSO_MAX_SIZE;
+ u16 gso_max_segs = GSO_MAX_SEGS;
int i;
unsigned int flags, dst_release_flag = IFF_XMIT_DST_RELEASE;
@@ -1394,11 +1396,16 @@ static void bond_compute_features(struct bonding *bond)
dst_release_flag &= slave->dev->priv_flags;
if (slave->dev->hard_header_len > max_hard_header_len)
max_hard_header_len = slave->dev->hard_header_len;
+
+ gso_max_size = min(gso_max_size, slave->dev->gso_max_size);
+ gso_max_segs = min(gso_max_segs, slave->dev->gso_max_segs);
}
done:
bond_dev->vlan_features = vlan_features;
bond_dev->hard_header_len = max_hard_header_len;
+ bond_dev->gso_max_segs = gso_max_segs;
+ netif_set_gso_max_size(bond_dev, gso_max_size);
flags = bond_dev->priv_flags & ~IFF_XMIT_DST_RELEASE;
bond_dev->priv_flags = flags | dst_release_flag;
diff --git a/drivers/net/ethernet/8390/ne.c b/drivers/net/ethernet/8390/ne.c
index d04911d33b64..47618e505355 100644
--- a/drivers/net/ethernet/8390/ne.c
+++ b/drivers/net/ethernet/8390/ne.c
@@ -813,6 +813,7 @@ static int __init ne_drv_probe(struct platform_device *pdev)
dev->irq = irq[this_dev];
dev->mem_end = bad[this_dev];
}
+ SET_NETDEV_DEV(dev, &pdev->dev);
err = do_ne_probe(dev);
if (err) {
free_netdev(dev);
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c
index c65295dded39..6e5bdd1a31d9 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c
@@ -1702,7 +1702,7 @@ static int bnx2x_set_eee(struct net_device *dev, struct ethtool_eee *edata)
SHMEM_EEE_ADV_STATUS_SHIFT);
if ((advertised != (eee_cfg & SHMEM_EEE_ADV_STATUS_MASK))) {
DP(BNX2X_MSG_ETHTOOL,
- "Direct manipulation of EEE advertisment is not supported\n");
+ "Direct manipulation of EEE advertisement is not supported\n");
return -EINVAL;
}
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c
index 6dd0dd076cc5..f6cfdc6cf20f 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c
@@ -9941,7 +9941,7 @@ static int bnx2x_848x3_config_init(struct bnx2x_phy *phy,
else
rc = bnx2x_8483x_disable_eee(phy, params, vars);
if (rc) {
- DP(NETIF_MSG_LINK, "Failed to set EEE advertisment\n");
+ DP(NETIF_MSG_LINK, "Failed to set EEE advertisement\n");
return rc;
}
} else {
@@ -12987,7 +12987,7 @@ static u8 bnx2x_analyze_link_error(struct link_params *params,
DP(NETIF_MSG_LINK, "Analyze TX Fault\n");
break;
default:
- DP(NETIF_MSG_LINK, "Analyze UNKOWN\n");
+ DP(NETIF_MSG_LINK, "Analyze UNKNOWN\n");
}
DP(NETIF_MSG_LINK, "Link changed:[%x %x]->%x\n", vars->link_up,
old_status, status);
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
index bd1fd3d87c24..01611b33a93d 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
@@ -9545,10 +9545,13 @@ static int __devinit bnx2x_prev_unload_common(struct bnx2x *bp)
*/
static void __devinit bnx2x_prev_interrupted_dmae(struct bnx2x *bp)
{
- u32 val = REG_RD(bp, PGLUE_B_REG_PGLUE_B_INT_STS);
- if (val & PGLUE_B_PGLUE_B_INT_STS_REG_WAS_ERROR_ATTN) {
- BNX2X_ERR("was error bit was found to be set in pglueb upon startup. Clearing");
- REG_WR(bp, PGLUE_B_REG_WAS_ERROR_PF_7_0_CLR, 1 << BP_FUNC(bp));
+ if (!CHIP_IS_E1x(bp)) {
+ u32 val = REG_RD(bp, PGLUE_B_REG_PGLUE_B_INT_STS);
+ if (val & PGLUE_B_PGLUE_B_INT_STS_REG_WAS_ERROR_ATTN) {
+ BNX2X_ERR("was error bit was found to be set in pglueb upon startup. Clearing");
+ REG_WR(bp, PGLUE_B_REG_WAS_ERROR_PF_7_0_CLR,
+ 1 << BP_FUNC(bp));
+ }
}
}
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
index 32eec15fe4c2..730ae2cfa49e 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
@@ -2519,6 +2519,7 @@ int t4_fw_bye(struct adapter *adap, unsigned int mbox)
{
struct fw_bye_cmd c;
+ memset(&c, 0, sizeof(c));
INIT_CMD(c, BYE, WRITE);
return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
}
@@ -2535,6 +2536,7 @@ int t4_early_init(struct adapter *adap, unsigned int mbox)
{
struct fw_initialize_cmd c;
+ memset(&c, 0, sizeof(c));
INIT_CMD(c, INITIALIZE, WRITE);
return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
}
@@ -2551,6 +2553,7 @@ int t4_fw_reset(struct adapter *adap, unsigned int mbox, int reset)
{
struct fw_reset_cmd c;
+ memset(&c, 0, sizeof(c));
INIT_CMD(c, RESET, WRITE);
c.val = htonl(reset);
return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
@@ -2828,7 +2831,7 @@ int t4_fixup_host_params(struct adapter *adap, unsigned int page_size,
HOSTPAGESIZEPF7(sge_hps));
t4_set_reg_field(adap, SGE_CONTROL,
- INGPADBOUNDARY(INGPADBOUNDARY_MASK) |
+ INGPADBOUNDARY_MASK |
EGRSTATUSPAGESIZE_MASK,
INGPADBOUNDARY(fl_align_log - 5) |
EGRSTATUSPAGESIZE(stat_len != 64));
@@ -3278,6 +3281,7 @@ int t4_identify_port(struct adapter *adap, unsigned int mbox, unsigned int viid,
{
struct fw_vi_enable_cmd c;
+ memset(&c, 0, sizeof(c));
c.op_to_viid = htonl(FW_CMD_OP(FW_VI_ENABLE_CMD) | FW_CMD_REQUEST |
FW_CMD_EXEC | FW_VI_ENABLE_CMD_VIID(viid));
c.ien_to_len16 = htonl(FW_VI_ENABLE_CMD_LED | FW_LEN16(c));
diff --git a/drivers/net/ethernet/freescale/gianfar.c b/drivers/net/ethernet/freescale/gianfar.c
index 1d03dcdd5e56..19ac096cb07b 100644
--- a/drivers/net/ethernet/freescale/gianfar.c
+++ b/drivers/net/ethernet/freescale/gianfar.c
@@ -1353,8 +1353,11 @@ static int gfar_restore(struct device *dev)
struct gfar_private *priv = dev_get_drvdata(dev);
struct net_device *ndev = priv->ndev;
- if (!netif_running(ndev))
+ if (!netif_running(ndev)) {
+ netif_device_attach(ndev);
+
return 0;
+ }
gfar_init_bds(ndev);
init_registers(ndev);
diff --git a/drivers/net/ethernet/jme.c b/drivers/net/ethernet/jme.c
index f8064df10cc4..60ac46f4ac08 100644
--- a/drivers/net/ethernet/jme.c
+++ b/drivers/net/ethernet/jme.c
@@ -1860,10 +1860,14 @@ jme_open(struct net_device *netdev)
jme_clear_pm(jme);
JME_NAPI_ENABLE(jme);
- tasklet_enable(&jme->linkch_task);
- tasklet_enable(&jme->txclean_task);
- tasklet_hi_enable(&jme->rxclean_task);
- tasklet_hi_enable(&jme->rxempty_task);
+ tasklet_init(&jme->linkch_task, jme_link_change_tasklet,
+ (unsigned long) jme);
+ tasklet_init(&jme->txclean_task, jme_tx_clean_tasklet,
+ (unsigned long) jme);
+ tasklet_init(&jme->rxclean_task, jme_rx_clean_tasklet,
+ (unsigned long) jme);
+ tasklet_init(&jme->rxempty_task, jme_rx_empty_tasklet,
+ (unsigned long) jme);
rc = jme_request_irq(jme);
if (rc)
@@ -1948,10 +1952,10 @@ jme_close(struct net_device *netdev)
JME_NAPI_DISABLE(jme);
- tasklet_disable(&jme->linkch_task);
- tasklet_disable(&jme->txclean_task);
- tasklet_disable(&jme->rxclean_task);
- tasklet_disable(&jme->rxempty_task);
+ tasklet_kill(&jme->linkch_task);
+ tasklet_kill(&jme->txclean_task);
+ tasklet_kill(&jme->rxclean_task);
+ tasklet_kill(&jme->rxempty_task);
jme_disable_rx_engine(jme);
jme_disable_tx_engine(jme);
@@ -3079,22 +3083,6 @@ jme_init_one(struct pci_dev *pdev,
tasklet_init(&jme->pcc_task,
jme_pcc_tasklet,
(unsigned long) jme);
- tasklet_init(&jme->linkch_task,
- jme_link_change_tasklet,
- (unsigned long) jme);
- tasklet_init(&jme->txclean_task,
- jme_tx_clean_tasklet,
- (unsigned long) jme);
- tasklet_init(&jme->rxclean_task,
- jme_rx_clean_tasklet,
- (unsigned long) jme);
- tasklet_init(&jme->rxempty_task,
- jme_rx_empty_tasklet,
- (unsigned long) jme);
- tasklet_disable_nosync(&jme->linkch_task);
- tasklet_disable_nosync(&jme->txclean_task);
- tasklet_disable_nosync(&jme->rxclean_task);
- tasklet_disable_nosync(&jme->rxempty_task);
jme->dpi.cur = PCC_P1;
jme->reg_ghc = 0;
diff --git a/drivers/net/ethernet/marvell/skge.c b/drivers/net/ethernet/marvell/skge.c
index 9b9c2ac5c4c2..d19a143aa5a8 100644
--- a/drivers/net/ethernet/marvell/skge.c
+++ b/drivers/net/ethernet/marvell/skge.c
@@ -4026,7 +4026,7 @@ static void __devexit skge_remove(struct pci_dev *pdev)
dev0 = hw->dev[0];
unregister_netdev(dev0);
- tasklet_disable(&hw->phy_task);
+ tasklet_kill(&hw->phy_task);
spin_lock_irq(&hw->hw_lock);
hw->intr_mask = 0;
diff --git a/drivers/net/ethernet/micrel/ksz884x.c b/drivers/net/ethernet/micrel/ksz884x.c
index 318fee91c79d..69e01977a1dd 100644
--- a/drivers/net/ethernet/micrel/ksz884x.c
+++ b/drivers/net/ethernet/micrel/ksz884x.c
@@ -5407,8 +5407,8 @@ static int netdev_close(struct net_device *dev)
/* Delay for receive task to stop scheduling itself. */
msleep(2000 / HZ);
- tasklet_disable(&hw_priv->rx_tasklet);
- tasklet_disable(&hw_priv->tx_tasklet);
+ tasklet_kill(&hw_priv->rx_tasklet);
+ tasklet_kill(&hw_priv->tx_tasklet);
free_irq(dev->irq, hw_priv->dev);
transmit_cleanup(hw_priv, 0);
@@ -5459,8 +5459,10 @@ static int prepare_hardware(struct net_device *dev)
rc = request_irq(dev->irq, netdev_intr, IRQF_SHARED, dev->name, dev);
if (rc)
return rc;
- tasklet_enable(&hw_priv->rx_tasklet);
- tasklet_enable(&hw_priv->tx_tasklet);
+ tasklet_init(&hw_priv->rx_tasklet, rx_proc_task,
+ (unsigned long) hw_priv);
+ tasklet_init(&hw_priv->tx_tasklet, tx_proc_task,
+ (unsigned long) hw_priv);
hw->promiscuous = 0;
hw->all_multi = 0;
@@ -7033,16 +7035,6 @@ static int __devinit pcidev_init(struct pci_dev *pdev,
spin_lock_init(&hw_priv->hwlock);
mutex_init(&hw_priv->lock);
- /* tasklet is enabled. */
- tasklet_init(&hw_priv->rx_tasklet, rx_proc_task,
- (unsigned long) hw_priv);
- tasklet_init(&hw_priv->tx_tasklet, tx_proc_task,
- (unsigned long) hw_priv);
-
- /* tasklet_enable will decrement the atomic counter. */
- tasklet_disable(&hw_priv->rx_tasklet);
- tasklet_disable(&hw_priv->tx_tasklet);
-
for (i = 0; i < TOTAL_PORT_NUM; i++)
init_waitqueue_head(&hw_priv->counter[i].counter);
diff --git a/drivers/net/ethernet/realtek/8139cp.c b/drivers/net/ethernet/realtek/8139cp.c
index 1c818254b7be..b01f83a044c4 100644
--- a/drivers/net/ethernet/realtek/8139cp.c
+++ b/drivers/net/ethernet/realtek/8139cp.c
@@ -979,17 +979,6 @@ static void cp_init_hw (struct cp_private *cp)
cpw32_f (MAC0 + 0, le32_to_cpu (*(__le32 *) (dev->dev_addr + 0)));
cpw32_f (MAC0 + 4, le32_to_cpu (*(__le32 *) (dev->dev_addr + 4)));
- cpw32_f(HiTxRingAddr, 0);
- cpw32_f(HiTxRingAddr + 4, 0);
-
- ring_dma = cp->ring_dma;
- cpw32_f(RxRingAddr, ring_dma & 0xffffffff);
- cpw32_f(RxRingAddr + 4, (ring_dma >> 16) >> 16);
-
- ring_dma += sizeof(struct cp_desc) * CP_RX_RING_SIZE;
- cpw32_f(TxRingAddr, ring_dma & 0xffffffff);
- cpw32_f(TxRingAddr + 4, (ring_dma >> 16) >> 16);
-
cp_start_hw(cp);
cpw8(TxThresh, 0x06); /* XXX convert magic num to a constant */
@@ -1003,6 +992,17 @@ static void cp_init_hw (struct cp_private *cp)
cpw8(Config5, cpr8(Config5) & PMEStatus);
+ cpw32_f(HiTxRingAddr, 0);
+ cpw32_f(HiTxRingAddr + 4, 0);
+
+ ring_dma = cp->ring_dma;
+ cpw32_f(RxRingAddr, ring_dma & 0xffffffff);
+ cpw32_f(RxRingAddr + 4, (ring_dma >> 16) >> 16);
+
+ ring_dma += sizeof(struct cp_desc) * CP_RX_RING_SIZE;
+ cpw32_f(TxRingAddr, ring_dma & 0xffffffff);
+ cpw32_f(TxRingAddr + 4, (ring_dma >> 16) >> 16);
+
cpw16(MultiIntr, 0);
cpw8_f(Cfg9346, Cfg9346_Lock);
diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c
index e7ff886e8047..927aa33d4349 100644
--- a/drivers/net/ethernet/realtek/r8169.c
+++ b/drivers/net/ethernet/realtek/r8169.c
@@ -3827,6 +3827,8 @@ static void rtl_wol_suspend_quirk(struct rtl8169_private *tp)
void __iomem *ioaddr = tp->mmio_addr;
switch (tp->mac_version) {
+ case RTL_GIGA_MAC_VER_25:
+ case RTL_GIGA_MAC_VER_26:
case RTL_GIGA_MAC_VER_29:
case RTL_GIGA_MAC_VER_30:
case RTL_GIGA_MAC_VER_32:
@@ -4519,6 +4521,9 @@ static void rtl_set_rx_mode(struct net_device *dev)
mc_filter[1] = swab32(data);
}
+ if (tp->mac_version == RTL_GIGA_MAC_VER_35)
+ mc_filter[1] = mc_filter[0] = 0xffffffff;
+
RTL_W32(MAR0 + 4, mc_filter[1]);
RTL_W32(MAR0 + 0, mc_filter[0]);
diff --git a/drivers/net/ethernet/sis/sis900.c b/drivers/net/ethernet/sis/sis900.c
index fb9f6b38511f..edf5edb13140 100644
--- a/drivers/net/ethernet/sis/sis900.c
+++ b/drivers/net/ethernet/sis/sis900.c
@@ -2479,7 +2479,7 @@ static int sis900_resume(struct pci_dev *pci_dev)
netif_start_queue(net_dev);
/* Workaround for EDB */
- sis900_set_mode(ioaddr, HW_SPEED_10_MBPS, FDX_CAPABLE_HALF_SELECTED);
+ sis900_set_mode(sis_priv, HW_SPEED_10_MBPS, FDX_CAPABLE_HALF_SELECTED);
/* Enable all known interrupts by setting the interrupt mask. */
sw32(imr, RxSOVR | RxORN | RxERR | RxOK | TxURN | TxERR | TxIDLE);
diff --git a/drivers/net/ethernet/smsc/smsc911x.c b/drivers/net/ethernet/smsc/smsc911x.c
index 62d1baf111ea..c53c0f4e2ce3 100644
--- a/drivers/net/ethernet/smsc/smsc911x.c
+++ b/drivers/net/ethernet/smsc/smsc911x.c
@@ -2110,7 +2110,7 @@ static void __devinit smsc911x_read_mac_address(struct net_device *dev)
static int __devinit smsc911x_init(struct net_device *dev)
{
struct smsc911x_data *pdata = netdev_priv(dev);
- unsigned int byte_test;
+ unsigned int byte_test, mask;
unsigned int to = 100;
SMSC_TRACE(pdata, probe, "Driver Parameters:");
@@ -2130,9 +2130,22 @@ static int __devinit smsc911x_init(struct net_device *dev)
/*
* poll the READY bit in PMT_CTRL. Any other access to the device is
* forbidden while this bit isn't set. Try for 100ms
+ *
+ * Note that this test is done before the WORD_SWAP register is
+ * programmed. So in some configurations the READY bit is at 16 before
+ * WORD_SWAP is written to. This issue is worked around by waiting
+ * until either bit 0 or bit 16 gets set in PMT_CTRL.
+ *
+ * SMSC has confirmed that checking bit 16 (marked as reserved in
+ * the datasheet) is fine since these bits "will either never be set
+ * or can only go high after READY does (so also indicate the device
+ * is ready)".
*/
- while (!(smsc911x_reg_read(pdata, PMT_CTRL) & PMT_CTRL_READY_) && --to)
+
+ mask = PMT_CTRL_READY_ | swahw32(PMT_CTRL_READY_);
+ while (!(smsc911x_reg_read(pdata, PMT_CTRL) & mask) && --to)
udelay(1000);
+
if (to == 0) {
pr_err("Device not READY in 100ms aborting\n");
return -ENODEV;
diff --git a/drivers/net/ethernet/tile/tilegx.c b/drivers/net/ethernet/tile/tilegx.c
index 4e9810013850..66e025ad5df1 100644
--- a/drivers/net/ethernet/tile/tilegx.c
+++ b/drivers/net/ethernet/tile/tilegx.c
@@ -917,7 +917,7 @@ static int tile_net_setup_interrupts(struct net_device *dev)
ingress_irq = rc;
tile_irq_activate(ingress_irq, TILE_IRQ_PERCPU);
rc = request_irq(ingress_irq, tile_net_handle_ingress_irq,
- 0, NULL, NULL);
+ 0, "tile_net", NULL);
if (rc != 0) {
netdev_err(dev, "request_irq failed: %d\n", rc);
destroy_irq(ingress_irq);
diff --git a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
index 0793299bd39e..a788501e978e 100644
--- a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
+++ b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
@@ -894,6 +894,8 @@ out:
return IRQ_HANDLED;
}
+static void axienet_dma_err_handler(unsigned long data);
+
/**
* axienet_open - Driver open routine.
* @ndev: Pointer to net_device structure
@@ -942,6 +944,10 @@ static int axienet_open(struct net_device *ndev)
phy_start(lp->phy_dev);
}
+ /* Enable tasklets for Axi DMA error handling */
+ tasklet_init(&lp->dma_err_tasklet, axienet_dma_err_handler,
+ (unsigned long) lp);
+
/* Enable interrupts for Axi DMA Tx */
ret = request_irq(lp->tx_irq, axienet_tx_irq, 0, ndev->name, ndev);
if (ret)
@@ -950,8 +956,7 @@ static int axienet_open(struct net_device *ndev)
ret = request_irq(lp->rx_irq, axienet_rx_irq, 0, ndev->name, ndev);
if (ret)
goto err_rx_irq;
- /* Enable tasklets for Axi DMA error handling */
- tasklet_enable(&lp->dma_err_tasklet);
+
return 0;
err_rx_irq:
@@ -960,6 +965,7 @@ err_tx_irq:
if (lp->phy_dev)
phy_disconnect(lp->phy_dev);
lp->phy_dev = NULL;
+ tasklet_kill(&lp->dma_err_tasklet);
dev_err(lp->dev, "request_irq() failed\n");
return ret;
}
@@ -990,7 +996,7 @@ static int axienet_stop(struct net_device *ndev)
axienet_setoptions(ndev, lp->options &
~(XAE_OPTION_TXEN | XAE_OPTION_RXEN));
- tasklet_disable(&lp->dma_err_tasklet);
+ tasklet_kill(&lp->dma_err_tasklet);
free_irq(lp->tx_irq, ndev);
free_irq(lp->rx_irq, ndev);
@@ -1613,10 +1619,6 @@ static int __devinit axienet_of_probe(struct platform_device *op)
goto err_iounmap_2;
}
- tasklet_init(&lp->dma_err_tasklet, axienet_dma_err_handler,
- (unsigned long) lp);
- tasklet_disable(&lp->dma_err_tasklet);
-
return 0;
err_iounmap_2:
diff --git a/drivers/net/ethernet/xscale/ixp4xx_eth.c b/drivers/net/ethernet/xscale/ixp4xx_eth.c
index 98934bdf6acf..477d6729b17f 100644
--- a/drivers/net/ethernet/xscale/ixp4xx_eth.c
+++ b/drivers/net/ethernet/xscale/ixp4xx_eth.c
@@ -1102,10 +1102,12 @@ static int init_queues(struct port *port)
{
int i;
- if (!ports_open)
- if (!(dma_pool = dma_pool_create(DRV_NAME, NULL,
- POOL_ALLOC_SIZE, 32, 0)))
+ if (!ports_open) {
+ dma_pool = dma_pool_create(DRV_NAME, &port->netdev->dev,
+ POOL_ALLOC_SIZE, 32, 0);
+ if (!dma_pool)
return -ENOMEM;
+ }
if (!(port->desc_tab = dma_pool_alloc(dma_pool, GFP_KERNEL,
&port->desc_tab_phys)))
diff --git a/drivers/net/irda/sir_dev.c b/drivers/net/irda/sir_dev.c
index 5039f08f5a5b..43e9ab4f4d7e 100644
--- a/drivers/net/irda/sir_dev.c
+++ b/drivers/net/irda/sir_dev.c
@@ -222,7 +222,7 @@ static void sirdev_config_fsm(struct work_struct *work)
break;
case SIRDEV_STATE_DONGLE_SPEED:
- if (dev->dongle_drv->reset) {
+ if (dev->dongle_drv->set_speed) {
ret = dev->dongle_drv->set_speed(dev, fsm->param);
if (ret < 0) {
fsm->result = ret;
diff --git a/drivers/net/phy/mdio-bitbang.c b/drivers/net/phy/mdio-bitbang.c
index 6428fcbbdd4b..daec9b05d168 100644
--- a/drivers/net/phy/mdio-bitbang.c
+++ b/drivers/net/phy/mdio-bitbang.c
@@ -234,7 +234,6 @@ void free_mdio_bitbang(struct mii_bus *bus)
struct mdiobb_ctrl *ctrl = bus->priv;
module_put(ctrl->ops->owner);
- mdiobus_unregister(bus);
mdiobus_free(bus);
}
EXPORT_SYMBOL(free_mdio_bitbang);
diff --git a/drivers/net/phy/mdio-gpio.c b/drivers/net/phy/mdio-gpio.c
index 899274f2f9b1..2ed1140df3e9 100644
--- a/drivers/net/phy/mdio-gpio.c
+++ b/drivers/net/phy/mdio-gpio.c
@@ -185,17 +185,20 @@ static int __devinit mdio_gpio_probe(struct platform_device *pdev)
{
struct mdio_gpio_platform_data *pdata;
struct mii_bus *new_bus;
- int ret;
+ int ret, bus_id;
- if (pdev->dev.of_node)
+ if (pdev->dev.of_node) {
pdata = mdio_gpio_of_get_data(pdev);
- else
+ bus_id = of_alias_get_id(pdev->dev.of_node, "mdio-gpio");
+ } else {
pdata = pdev->dev.platform_data;
+ bus_id = pdev->id;
+ }
if (!pdata)
return -ENODEV;
- new_bus = mdio_gpio_bus_init(&pdev->dev, pdata, pdev->id);
+ new_bus = mdio_gpio_bus_init(&pdev->dev, pdata, bus_id);
if (!new_bus)
return -ENODEV;
diff --git a/drivers/net/team/team_mode_broadcast.c b/drivers/net/team/team_mode_broadcast.c
index 9db0171e9366..c5db428e73fa 100644
--- a/drivers/net/team/team_mode_broadcast.c
+++ b/drivers/net/team/team_mode_broadcast.c
@@ -29,8 +29,8 @@ static bool bc_transmit(struct team *team, struct sk_buff *skb)
if (last) {
skb2 = skb_clone(skb, GFP_ATOMIC);
if (skb2) {
- ret = team_dev_queue_xmit(team, last,
- skb2);
+ ret = !team_dev_queue_xmit(team, last,
+ skb2);
if (!sum_ret)
sum_ret = ret;
}
@@ -39,7 +39,7 @@ static bool bc_transmit(struct team *team, struct sk_buff *skb)
}
}
if (last) {
- ret = team_dev_queue_xmit(team, last, skb);
+ ret = !team_dev_queue_xmit(team, last, skb);
if (!sum_ret)
sum_ret = ret;
}
diff --git a/drivers/net/usb/cdc_eem.c b/drivers/net/usb/cdc_eem.c
index c81e278629ff..08d55b6bf272 100644
--- a/drivers/net/usb/cdc_eem.c
+++ b/drivers/net/usb/cdc_eem.c
@@ -31,6 +31,7 @@
#include <linux/usb/cdc.h>
#include <linux/usb/usbnet.h>
#include <linux/gfp.h>
+#include <linux/if_vlan.h>
/*
@@ -92,7 +93,7 @@ static int eem_bind(struct usbnet *dev, struct usb_interface *intf)
/* no jumbogram (16K) support for now */
- dev->net->hard_header_len += EEM_HEAD + ETH_FCS_LEN;
+ dev->net->hard_header_len += EEM_HEAD + ETH_FCS_LEN + VLAN_HLEN;
dev->hard_mtu = dev->net->mtu + dev->net->hard_header_len;
return 0;
diff --git a/drivers/net/usb/cdc_ncm.c b/drivers/net/usb/cdc_ncm.c
index 4cd582a4f625..74fab1a40156 100644
--- a/drivers/net/usb/cdc_ncm.c
+++ b/drivers/net/usb/cdc_ncm.c
@@ -540,10 +540,12 @@ advance:
(ctx->ether_desc == NULL) || (ctx->control != intf))
goto error;
- /* claim interfaces, if any */
- temp = usb_driver_claim_interface(driver, ctx->data, dev);
- if (temp)
- goto error;
+ /* claim data interface, if different from control */
+ if (ctx->data != ctx->control) {
+ temp = usb_driver_claim_interface(driver, ctx->data, dev);
+ if (temp)
+ goto error;
+ }
iface_no = ctx->data->cur_altsetting->desc.bInterfaceNumber;
@@ -623,6 +625,10 @@ static void cdc_ncm_unbind(struct usbnet *dev, struct usb_interface *intf)
tasklet_kill(&ctx->bh);
+ /* handle devices with combined control and data interface */
+ if (ctx->control == ctx->data)
+ ctx->data = NULL;
+
/* disconnect master --> disconnect slave */
if (intf == ctx->control && ctx->data) {
usb_set_intfdata(ctx->data, NULL);
@@ -1245,6 +1251,14 @@ static const struct usb_device_id cdc_devs[] = {
.driver_info = (unsigned long) &wwan_info,
},
+ /* Huawei NCM devices disguised as vendor specific */
+ { USB_VENDOR_AND_INTERFACE_INFO(0x12d1, 0xff, 0x02, 0x16),
+ .driver_info = (unsigned long)&wwan_info,
+ },
+ { USB_VENDOR_AND_INTERFACE_INFO(0x12d1, 0xff, 0x02, 0x46),
+ .driver_info = (unsigned long)&wwan_info,
+ },
+
/* Generic CDC-NCM devices */
{ USB_INTERFACE_INFO(USB_CLASS_COMM,
USB_CDC_SUBCLASS_NCM, USB_CDC_PROTO_NONE),
diff --git a/drivers/net/usb/smsc95xx.c b/drivers/net/usb/smsc95xx.c
index 7479a5761d0d..362cb8cfeb92 100644
--- a/drivers/net/usb/smsc95xx.c
+++ b/drivers/net/usb/smsc95xx.c
@@ -184,7 +184,7 @@ static int smsc95xx_mdio_read(struct net_device *netdev, int phy_id, int idx)
/* set the address, index & direction (read from PHY) */
phy_id &= dev->mii.phy_id_mask;
idx &= dev->mii.reg_num_mask;
- addr = (phy_id << 11) | (idx << 6) | MII_READ_;
+ addr = (phy_id << 11) | (idx << 6) | MII_READ_ | MII_BUSY_;
ret = smsc95xx_write_reg(dev, MII_ADDR, addr);
check_warn_goto_done(ret, "Error writing MII_ADDR");
@@ -221,7 +221,7 @@ static void smsc95xx_mdio_write(struct net_device *netdev, int phy_id, int idx,
/* set the address, index & direction (write to PHY) */
phy_id &= dev->mii.phy_id_mask;
idx &= dev->mii.reg_num_mask;
- addr = (phy_id << 11) | (idx << 6) | MII_WRITE_;
+ addr = (phy_id << 11) | (idx << 6) | MII_WRITE_ | MII_BUSY_;
ret = smsc95xx_write_reg(dev, MII_ADDR, addr);
check_warn_goto_done(ret, "Error writing MII_ADDR");
@@ -1344,6 +1344,7 @@ static struct sk_buff *smsc95xx_tx_fixup(struct usbnet *dev,
} else {
u32 csum_preamble = smsc95xx_calc_csum_preamble(skb);
skb_push(skb, 4);
+ cpu_to_le32s(&csum_preamble);
memcpy(skb->data, &csum_preamble, 4);
}
}
diff --git a/drivers/net/usb/usbnet.c b/drivers/net/usb/usbnet.c
index cb04f900cc46..edb81ed06950 100644
--- a/drivers/net/usb/usbnet.c
+++ b/drivers/net/usb/usbnet.c
@@ -359,10 +359,12 @@ static enum skb_state defer_bh(struct usbnet *dev, struct sk_buff *skb,
void usbnet_defer_kevent (struct usbnet *dev, int work)
{
set_bit (work, &dev->flags);
- if (!schedule_work (&dev->kevent))
- netdev_err(dev->net, "kevent %d may have been dropped\n", work);
- else
+ if (!schedule_work (&dev->kevent)) {
+ if (net_ratelimit())
+ netdev_err(dev->net, "kevent %d may have been dropped\n", work);
+ } else {
netdev_dbg(dev->net, "kevent %d scheduled\n", work);
+ }
}
EXPORT_SYMBOL_GPL(usbnet_defer_kevent);
diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c
index 7b4adde93c01..8b5c61917076 100644
--- a/drivers/net/vxlan.c
+++ b/drivers/net/vxlan.c
@@ -1,5 +1,5 @@
/*
- * VXLAN: Virtual eXtensiable Local Area Network
+ * VXLAN: Virtual eXtensible Local Area Network
*
* Copyright (c) 2012 Vyatta Inc.
*
@@ -50,8 +50,8 @@
#define VXLAN_N_VID (1u << 24)
#define VXLAN_VID_MASK (VXLAN_N_VID - 1)
-/* VLAN + IP header + UDP + VXLAN */
-#define VXLAN_HEADROOM (4 + 20 + 8 + 8)
+/* IP header + UDP + VXLAN + Ethernet header */
+#define VXLAN_HEADROOM (20 + 8 + 8 + 14)
#define VXLAN_FLAGS 0x08000000 /* struct vxlanhdr.vx_flags required value. */
@@ -1102,6 +1102,10 @@ static int vxlan_newlink(struct net *net, struct net_device *dev,
if (!tb[IFLA_MTU])
dev->mtu = lowerdev->mtu - VXLAN_HEADROOM;
+
+ /* update header length based on lower device */
+ dev->hard_header_len = lowerdev->hard_header_len +
+ VXLAN_HEADROOM;
}
if (data[IFLA_VXLAN_TOS])
diff --git a/drivers/net/wan/ixp4xx_hss.c b/drivers/net/wan/ixp4xx_hss.c
index 3f575afd8cfc..e9a3da588e95 100644
--- a/drivers/net/wan/ixp4xx_hss.c
+++ b/drivers/net/wan/ixp4xx_hss.c
@@ -969,10 +969,12 @@ static int init_hdlc_queues(struct port *port)
{
int i;
- if (!ports_open)
- if (!(dma_pool = dma_pool_create(DRV_NAME, NULL,
- POOL_ALLOC_SIZE, 32, 0)))
+ if (!ports_open) {
+ dma_pool = dma_pool_create(DRV_NAME, &port->netdev->dev,
+ POOL_ALLOC_SIZE, 32, 0);
+ if (!dma_pool)
return -ENOMEM;
+ }
if (!(port->desc_tab = dma_pool_alloc(dma_pool, GFP_KERNEL,
&port->desc_tab_phys)))
diff --git a/drivers/net/wireless/ath/ath9k/hw.c b/drivers/net/wireless/ath/ath9k/hw.c
index 8e1559aba495..1829b445d0b0 100644
--- a/drivers/net/wireless/ath/ath9k/hw.c
+++ b/drivers/net/wireless/ath/ath9k/hw.c
@@ -1456,7 +1456,7 @@ static bool ath9k_hw_set_reset_reg(struct ath_hw *ah, u32 type)
switch (type) {
case ATH9K_RESET_POWER_ON:
ret = ath9k_hw_set_reset_power_on(ah);
- if (!ret)
+ if (ret)
ah->reset_power_on = true;
break;
case ATH9K_RESET_WARM:
diff --git a/drivers/net/wireless/b43legacy/pio.c b/drivers/net/wireless/b43legacy/pio.c
index 192251adf986..282eedec675e 100644
--- a/drivers/net/wireless/b43legacy/pio.c
+++ b/drivers/net/wireless/b43legacy/pio.c
@@ -382,7 +382,7 @@ static void cancel_transfers(struct b43legacy_pioqueue *queue)
{
struct b43legacy_pio_txpacket *packet, *tmp_packet;
- tasklet_disable(&queue->txtask);
+ tasklet_kill(&queue->txtask);
list_for_each_entry_safe(packet, tmp_packet, &queue->txrunning, list)
free_txpacket(packet, 0);
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.c b/drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.c
index a6f1e8166008..481345c23ded 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.c
+++ b/drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.c
@@ -4401,7 +4401,7 @@ static s32 brcmf_mode_to_nl80211_iftype(s32 mode)
static void brcmf_wiphy_pno_params(struct wiphy *wiphy)
{
-#ifndef CONFIG_BRCMFISCAN
+#ifndef CONFIG_BRCMISCAN
/* scheduled scan settings */
wiphy->max_sched_scan_ssids = BRCMF_PNO_MAX_PFN_COUNT;
wiphy->max_match_sets = BRCMF_PNO_MAX_PFN_COUNT;
diff --git a/drivers/net/wireless/iwlwifi/dvm/mac80211.c b/drivers/net/wireless/iwlwifi/dvm/mac80211.c
index ff8162d4c454..2d9eee93c743 100644
--- a/drivers/net/wireless/iwlwifi/dvm/mac80211.c
+++ b/drivers/net/wireless/iwlwifi/dvm/mac80211.c
@@ -521,7 +521,7 @@ static void iwlagn_mac_tx(struct ieee80211_hw *hw,
ieee80211_get_tx_rate(hw, IEEE80211_SKB_CB(skb))->bitrate);
if (iwlagn_tx_skb(priv, control->sta, skb))
- dev_kfree_skb_any(skb);
+ ieee80211_free_txskb(hw, skb);
}
static void iwlagn_mac_update_tkip_key(struct ieee80211_hw *hw,
@@ -1354,6 +1354,20 @@ static int iwlagn_mac_add_interface(struct ieee80211_hw *hw,
vif_priv->ctx = ctx;
ctx->vif = vif;
+ /*
+ * In SNIFFER device type, the firmware reports the FCS to
+ * the host, rather than snipping it off. Unfortunately,
+ * mac80211 doesn't (yet) provide a per-packet flag for
+ * this, so that we have to set the hardware flag based
+ * on the interfaces added. As the monitor interface can
+ * only be present by itself, and will be removed before
+ * other interfaces are added, this is safe.
+ */
+ if (vif->type == NL80211_IFTYPE_MONITOR)
+ priv->hw->flags |= IEEE80211_HW_RX_INCLUDES_FCS;
+ else
+ priv->hw->flags &= ~IEEE80211_HW_RX_INCLUDES_FCS;
+
err = iwl_setup_interface(priv, ctx);
if (!err || reset)
goto out;
diff --git a/drivers/net/wireless/iwlwifi/dvm/main.c b/drivers/net/wireless/iwlwifi/dvm/main.c
index 7ff3f1430678..408132cf83c1 100644
--- a/drivers/net/wireless/iwlwifi/dvm/main.c
+++ b/drivers/net/wireless/iwlwifi/dvm/main.c
@@ -2114,7 +2114,7 @@ static void iwl_free_skb(struct iwl_op_mode *op_mode, struct sk_buff *skb)
info = IEEE80211_SKB_CB(skb);
iwl_trans_free_tx_cmd(priv->trans, info->driver_data[1]);
- dev_kfree_skb_any(skb);
+ ieee80211_free_txskb(priv->hw, skb);
}
static void iwl_set_hw_rfkill_state(struct iwl_op_mode *op_mode, bool state)
diff --git a/drivers/net/wireless/iwlwifi/pcie/rx.c b/drivers/net/wireless/iwlwifi/pcie/rx.c
index 17c8e5d82681..bb69f8f90b3b 100644
--- a/drivers/net/wireless/iwlwifi/pcie/rx.c
+++ b/drivers/net/wireless/iwlwifi/pcie/rx.c
@@ -321,6 +321,14 @@ static void iwl_rx_allocate(struct iwl_trans *trans, gfp_t priority)
dma_map_page(trans->dev, page, 0,
PAGE_SIZE << trans_pcie->rx_page_order,
DMA_FROM_DEVICE);
+ if (dma_mapping_error(trans->dev, rxb->page_dma)) {
+ rxb->page = NULL;
+ spin_lock_irqsave(&rxq->lock, flags);
+ list_add(&rxb->list, &rxq->rx_used);
+ spin_unlock_irqrestore(&rxq->lock, flags);
+ __free_pages(page, trans_pcie->rx_page_order);
+ return;
+ }
/* dma address must be no more than 36 bits */
BUG_ON(rxb->page_dma & ~DMA_BIT_MASK(36));
/* and also 256 byte aligned! */
@@ -488,8 +496,19 @@ static void iwl_rx_handle_rxbuf(struct iwl_trans *trans,
dma_map_page(trans->dev, rxb->page, 0,
PAGE_SIZE << trans_pcie->rx_page_order,
DMA_FROM_DEVICE);
- list_add_tail(&rxb->list, &rxq->rx_free);
- rxq->free_count++;
+ if (dma_mapping_error(trans->dev, rxb->page_dma)) {
+ /*
+ * free the page(s) as well to not break
+ * the invariant that the items on the used
+ * list have no page(s)
+ */
+ __free_pages(rxb->page, trans_pcie->rx_page_order);
+ rxb->page = NULL;
+ list_add_tail(&rxb->list, &rxq->rx_used);
+ } else {
+ list_add_tail(&rxb->list, &rxq->rx_free);
+ rxq->free_count++;
+ }
} else
list_add_tail(&rxb->list, &rxq->rx_used);
spin_unlock_irqrestore(&rxq->lock, flags);
diff --git a/drivers/net/wireless/iwlwifi/pcie/tx.c b/drivers/net/wireless/iwlwifi/pcie/tx.c
index 105e3af3c621..79a4ddc002d3 100644
--- a/drivers/net/wireless/iwlwifi/pcie/tx.c
+++ b/drivers/net/wireless/iwlwifi/pcie/tx.c
@@ -480,20 +480,12 @@ void iwl_trans_pcie_txq_enable(struct iwl_trans *trans, int txq_id, int fifo,
void iwl_trans_pcie_txq_disable(struct iwl_trans *trans, int txq_id)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
- u16 rd_ptr, wr_ptr;
- int n_bd = trans_pcie->txq[txq_id].q.n_bd;
if (!test_and_clear_bit(txq_id, trans_pcie->queue_used)) {
WARN_ONCE(1, "queue %d not used", txq_id);
return;
}
- rd_ptr = iwl_read_prph(trans, SCD_QUEUE_RDPTR(txq_id)) & (n_bd - 1);
- wr_ptr = iwl_read_prph(trans, SCD_QUEUE_WRPTR(txq_id));
-
- WARN_ONCE(rd_ptr != wr_ptr, "queue %d isn't empty: [%d,%d]",
- txq_id, rd_ptr, wr_ptr);
-
iwl_txq_set_inactive(trans, txq_id);
IWL_DEBUG_TX_QUEUES(trans, "Deactivate queue %d\n", txq_id);
}
diff --git a/drivers/net/wireless/mwifiex/cmdevt.c b/drivers/net/wireless/mwifiex/cmdevt.c
index 8d465107f52b..ae9010ed58de 100644
--- a/drivers/net/wireless/mwifiex/cmdevt.c
+++ b/drivers/net/wireless/mwifiex/cmdevt.c
@@ -890,9 +890,6 @@ mwifiex_cmd_timeout_func(unsigned long function_context)
return;
}
cmd_node = adapter->curr_cmd;
- if (cmd_node->wait_q_enabled)
- adapter->cmd_wait_q.status = -ETIMEDOUT;
-
if (cmd_node) {
adapter->dbg.timeout_cmd_id =
adapter->dbg.last_cmd_id[adapter->dbg.last_cmd_index];
@@ -938,6 +935,14 @@ mwifiex_cmd_timeout_func(unsigned long function_context)
dev_err(adapter->dev, "ps_mode=%d ps_state=%d\n",
adapter->ps_mode, adapter->ps_state);
+
+ if (cmd_node->wait_q_enabled) {
+ adapter->cmd_wait_q.status = -ETIMEDOUT;
+ wake_up_interruptible(&adapter->cmd_wait_q.wait);
+ mwifiex_cancel_pending_ioctl(adapter);
+ /* reset cmd_sent flag to unblock new commands */
+ adapter->cmd_sent = false;
+ }
}
if (adapter->hw_status == MWIFIEX_HW_STATUS_INITIALIZING)
mwifiex_init_fw_complete(adapter);
diff --git a/drivers/net/wireless/mwifiex/sdio.c b/drivers/net/wireless/mwifiex/sdio.c
index fc8a9bfa1248..82cf0fa2d9f6 100644
--- a/drivers/net/wireless/mwifiex/sdio.c
+++ b/drivers/net/wireless/mwifiex/sdio.c
@@ -161,7 +161,6 @@ static int mwifiex_sdio_suspend(struct device *dev)
struct sdio_mmc_card *card;
struct mwifiex_adapter *adapter;
mmc_pm_flag_t pm_flag = 0;
- int hs_actived = 0;
int i;
int ret = 0;
@@ -188,12 +187,14 @@ static int mwifiex_sdio_suspend(struct device *dev)
adapter = card->adapter;
/* Enable the Host Sleep */
- hs_actived = mwifiex_enable_hs(adapter);
- if (hs_actived) {
- pr_debug("cmd: suspend with MMC_PM_KEEP_POWER\n");
- ret = sdio_set_host_pm_flags(func, MMC_PM_KEEP_POWER);
+ if (!mwifiex_enable_hs(adapter)) {
+ dev_err(adapter->dev, "cmd: failed to suspend\n");
+ return -EFAULT;
}
+ dev_dbg(adapter->dev, "cmd: suspend with MMC_PM_KEEP_POWER\n");
+ ret = sdio_set_host_pm_flags(func, MMC_PM_KEEP_POWER);
+
/* Indicate device suspended */
adapter->is_suspended = true;
diff --git a/drivers/net/wireless/rtlwifi/rtl8192cu/sw.c b/drivers/net/wireless/rtlwifi/rtl8192cu/sw.c
index 9970c2b1b199..b7e6607e6b6d 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192cu/sw.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192cu/sw.c
@@ -297,6 +297,7 @@ static struct usb_device_id rtl8192c_usb_ids[] = {
/*=== Customer ID ===*/
/****** 8188CU ********/
{RTL_USB_DEVICE(0x050d, 0x1102, rtl92cu_hal_cfg)}, /*Belkin - Edimax*/
+ {RTL_USB_DEVICE(0x050d, 0x11f2, rtl92cu_hal_cfg)}, /*Belkin - ISY*/
{RTL_USB_DEVICE(0x06f8, 0xe033, rtl92cu_hal_cfg)}, /*Hercules - Edimax*/
{RTL_USB_DEVICE(0x07b8, 0x8188, rtl92cu_hal_cfg)}, /*Abocom - Abocom*/
{RTL_USB_DEVICE(0x07b8, 0x8189, rtl92cu_hal_cfg)}, /*Funai - Abocom*/
diff --git a/drivers/net/xen-netfront.c b/drivers/net/xen-netfront.c
index caa011008cd0..fc24eb9b3948 100644
--- a/drivers/net/xen-netfront.c
+++ b/drivers/net/xen-netfront.c
@@ -452,29 +452,85 @@ static void xennet_make_frags(struct sk_buff *skb, struct net_device *dev,
/* Grant backend access to each skb fragment page. */
for (i = 0; i < frags; i++) {
skb_frag_t *frag = skb_shinfo(skb)->frags + i;
+ struct page *page = skb_frag_page(frag);
- tx->flags |= XEN_NETTXF_more_data;
+ len = skb_frag_size(frag);
+ offset = frag->page_offset;
- id = get_id_from_freelist(&np->tx_skb_freelist, np->tx_skbs);
- np->tx_skbs[id].skb = skb_get(skb);
- tx = RING_GET_REQUEST(&np->tx, prod++);
- tx->id = id;
- ref = gnttab_claim_grant_reference(&np->gref_tx_head);
- BUG_ON((signed short)ref < 0);
+ /* Data must not cross a page boundary. */
+ BUG_ON(len + offset > PAGE_SIZE<<compound_order(page));
- mfn = pfn_to_mfn(page_to_pfn(skb_frag_page(frag)));
- gnttab_grant_foreign_access_ref(ref, np->xbdev->otherend_id,
- mfn, GNTMAP_readonly);
+ /* Skip unused frames from start of page */
+ page += offset >> PAGE_SHIFT;
+ offset &= ~PAGE_MASK;
- tx->gref = np->grant_tx_ref[id] = ref;
- tx->offset = frag->page_offset;
- tx->size = skb_frag_size(frag);
- tx->flags = 0;
+ while (len > 0) {
+ unsigned long bytes;
+
+ BUG_ON(offset >= PAGE_SIZE);
+
+ bytes = PAGE_SIZE - offset;
+ if (bytes > len)
+ bytes = len;
+
+ tx->flags |= XEN_NETTXF_more_data;
+
+ id = get_id_from_freelist(&np->tx_skb_freelist,
+ np->tx_skbs);
+ np->tx_skbs[id].skb = skb_get(skb);
+ tx = RING_GET_REQUEST(&np->tx, prod++);
+ tx->id = id;
+ ref = gnttab_claim_grant_reference(&np->gref_tx_head);
+ BUG_ON((signed short)ref < 0);
+
+ mfn = pfn_to_mfn(page_to_pfn(page));
+ gnttab_grant_foreign_access_ref(ref,
+ np->xbdev->otherend_id,
+ mfn, GNTMAP_readonly);
+
+ tx->gref = np->grant_tx_ref[id] = ref;
+ tx->offset = offset;
+ tx->size = bytes;
+ tx->flags = 0;
+
+ offset += bytes;
+ len -= bytes;
+
+ /* Next frame */
+ if (offset == PAGE_SIZE && len) {
+ BUG_ON(!PageCompound(page));
+ page++;
+ offset = 0;
+ }
+ }
}
np->tx.req_prod_pvt = prod;
}
+/*
+ * Count how many ring slots are required to send the frags of this
+ * skb. Each frag might be a compound page.
+ */
+static int xennet_count_skb_frag_slots(struct sk_buff *skb)
+{
+ int i, frags = skb_shinfo(skb)->nr_frags;
+ int pages = 0;
+
+ for (i = 0; i < frags; i++) {
+ skb_frag_t *frag = skb_shinfo(skb)->frags + i;
+ unsigned long size = skb_frag_size(frag);
+ unsigned long offset = frag->page_offset;
+
+ /* Skip unused frames from start of page */
+ offset &= ~PAGE_MASK;
+
+ pages += PFN_UP(offset + size);
+ }
+
+ return pages;
+}
+
static int xennet_start_xmit(struct sk_buff *skb, struct net_device *dev)
{
unsigned short id;
@@ -487,23 +543,23 @@ static int xennet_start_xmit(struct sk_buff *skb, struct net_device *dev)
grant_ref_t ref;
unsigned long mfn;
int notify;
- int frags = skb_shinfo(skb)->nr_frags;
+ int slots;
unsigned int offset = offset_in_page(data);
unsigned int len = skb_headlen(skb);
unsigned long flags;
- frags += DIV_ROUND_UP(offset + len, PAGE_SIZE);
- if (unlikely(frags > MAX_SKB_FRAGS + 1)) {
- printk(KERN_ALERT "xennet: skb rides the rocket: %d frags\n",
- frags);
- dump_stack();
+ slots = DIV_ROUND_UP(offset + len, PAGE_SIZE) +
+ xennet_count_skb_frag_slots(skb);
+ if (unlikely(slots > MAX_SKB_FRAGS + 1)) {
+ net_alert_ratelimited(
+ "xennet: skb rides the rocket: %d slots\n", slots);
goto drop;
}
spin_lock_irqsave(&np->tx_lock, flags);
if (unlikely(!netif_carrier_ok(dev) ||
- (frags > 1 && !xennet_can_sg(dev)) ||
+ (slots > 1 && !xennet_can_sg(dev)) ||
netif_needs_gso(skb, netif_skb_features(skb)))) {
spin_unlock_irqrestore(&np->tx_lock, flags);
goto drop;
diff --git a/drivers/nfc/pn533.c b/drivers/nfc/pn533.c
index 97c440a8cd61..30ae18a03a9c 100644
--- a/drivers/nfc/pn533.c
+++ b/drivers/nfc/pn533.c
@@ -698,13 +698,14 @@ static void pn533_wq_cmd(struct work_struct *work)
cmd = list_first_entry(&dev->cmd_queue, struct pn533_cmd, queue);
+ list_del(&cmd->queue);
+
mutex_unlock(&dev->cmd_lock);
__pn533_send_cmd_frame_async(dev, cmd->out_frame, cmd->in_frame,
cmd->in_frame_len, cmd->cmd_complete,
cmd->arg, cmd->flags);
- list_del(&cmd->queue);
kfree(cmd);
}
@@ -1678,11 +1679,14 @@ static void pn533_deactivate_target(struct nfc_dev *nfc_dev,
static int pn533_in_dep_link_up_complete(struct pn533 *dev, void *arg,
u8 *params, int params_len)
{
- struct pn533_cmd_jump_dep *cmd;
struct pn533_cmd_jump_dep_response *resp;
struct nfc_target nfc_target;
u8 target_gt_len;
int rc;
+ struct pn533_cmd_jump_dep *cmd = (struct pn533_cmd_jump_dep *)arg;
+ u8 active = cmd->active;
+
+ kfree(arg);
if (params_len == -ENOENT) {
nfc_dev_dbg(&dev->interface->dev, "");
@@ -1704,7 +1708,6 @@ static int pn533_in_dep_link_up_complete(struct pn533 *dev, void *arg,
}
resp = (struct pn533_cmd_jump_dep_response *) params;
- cmd = (struct pn533_cmd_jump_dep *) arg;
rc = resp->status & PN533_CMD_RET_MASK;
if (rc != PN533_CMD_RET_SUCCESS) {
nfc_dev_err(&dev->interface->dev,
@@ -1734,7 +1737,7 @@ static int pn533_in_dep_link_up_complete(struct pn533 *dev, void *arg,
if (rc == 0)
rc = nfc_dep_link_is_up(dev->nfc_dev,
dev->nfc_dev->targets[0].idx,
- !cmd->active, NFC_RF_INITIATOR);
+ !active, NFC_RF_INITIATOR);
return 0;
}
@@ -1819,12 +1822,8 @@ static int pn533_dep_link_up(struct nfc_dev *nfc_dev, struct nfc_target *target,
rc = pn533_send_cmd_frame_async(dev, dev->out_frame, dev->in_frame,
dev->in_maxlen, pn533_in_dep_link_up_complete,
cmd, GFP_KERNEL);
- if (rc)
- goto out;
-
-
-out:
- kfree(cmd);
+ if (rc < 0)
+ kfree(cmd);
return rc;
}
@@ -2078,8 +2077,12 @@ error:
static int pn533_tm_send_complete(struct pn533 *dev, void *arg,
u8 *params, int params_len)
{
+ struct sk_buff *skb_out = arg;
+
nfc_dev_dbg(&dev->interface->dev, "%s", __func__);
+ dev_kfree_skb(skb_out);
+
if (params_len < 0) {
nfc_dev_err(&dev->interface->dev,
"Error %d when sending data",
@@ -2117,7 +2120,7 @@ static int pn533_tm_send(struct nfc_dev *nfc_dev, struct sk_buff *skb)
rc = pn533_send_cmd_frame_async(dev, out_frame, dev->in_frame,
dev->in_maxlen, pn533_tm_send_complete,
- NULL, GFP_KERNEL);
+ skb, GFP_KERNEL);
if (rc) {
nfc_dev_err(&dev->interface->dev,
"Error %d when trying to send data", rc);
diff --git a/drivers/pci/bus.c b/drivers/pci/bus.c
index 6241fd05bd41..a543746fb354 100644
--- a/drivers/pci/bus.c
+++ b/drivers/pci/bus.c
@@ -320,10 +320,7 @@ void pci_walk_bus(struct pci_bus *top, int (*cb)(struct pci_dev *, void *),
} else
next = dev->bus_list.next;
- /* Run device routines with the device locked */
- device_lock(&dev->dev);
retval = cb(dev, userdata);
- device_unlock(&dev->dev);
if (retval)
break;
}
diff --git a/drivers/pci/pci-driver.c b/drivers/pci/pci-driver.c
index 94c6e2aa03d6..6c94fc9489e7 100644
--- a/drivers/pci/pci-driver.c
+++ b/drivers/pci/pci-driver.c
@@ -398,6 +398,8 @@ static void pci_device_shutdown(struct device *dev)
struct pci_dev *pci_dev = to_pci_dev(dev);
struct pci_driver *drv = pci_dev->driver;
+ pm_runtime_resume(dev);
+
if (drv && drv->shutdown)
drv->shutdown(pci_dev);
pci_msi_shutdown(pci_dev);
@@ -408,16 +410,6 @@ static void pci_device_shutdown(struct device *dev)
* continue to do DMA
*/
pci_disable_device(pci_dev);
-
- /*
- * Devices may be enabled to wake up by runtime PM, but they need not
- * be supposed to wake up the system from its "power off" state (e.g.
- * ACPI S5). Therefore disable wakeup for all devices that aren't
- * supposed to wake up the system at this point. The state argument
- * will be ignored by pci_enable_wake().
- */
- if (!device_may_wakeup(dev))
- pci_enable_wake(pci_dev, PCI_UNKNOWN, false);
}
#ifdef CONFIG_PM
diff --git a/drivers/pci/pci-sysfs.c b/drivers/pci/pci-sysfs.c
index 02d107b15281..f39378d9da15 100644
--- a/drivers/pci/pci-sysfs.c
+++ b/drivers/pci/pci-sysfs.c
@@ -458,40 +458,6 @@ boot_vga_show(struct device *dev, struct device_attribute *attr, char *buf)
}
struct device_attribute vga_attr = __ATTR_RO(boot_vga);
-static void
-pci_config_pm_runtime_get(struct pci_dev *pdev)
-{
- struct device *dev = &pdev->dev;
- struct device *parent = dev->parent;
-
- if (parent)
- pm_runtime_get_sync(parent);
- pm_runtime_get_noresume(dev);
- /*
- * pdev->current_state is set to PCI_D3cold during suspending,
- * so wait until suspending completes
- */
- pm_runtime_barrier(dev);
- /*
- * Only need to resume devices in D3cold, because config
- * registers are still accessible for devices suspended but
- * not in D3cold.
- */
- if (pdev->current_state == PCI_D3cold)
- pm_runtime_resume(dev);
-}
-
-static void
-pci_config_pm_runtime_put(struct pci_dev *pdev)
-{
- struct device *dev = &pdev->dev;
- struct device *parent = dev->parent;
-
- pm_runtime_put(dev);
- if (parent)
- pm_runtime_put_sync(parent);
-}
-
static ssize_t
pci_read_config(struct file *filp, struct kobject *kobj,
struct bin_attribute *bin_attr,
diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c
index 54858838f098..aabf64798bda 100644
--- a/drivers/pci/pci.c
+++ b/drivers/pci/pci.c
@@ -1858,6 +1858,38 @@ bool pci_dev_run_wake(struct pci_dev *dev)
}
EXPORT_SYMBOL_GPL(pci_dev_run_wake);
+void pci_config_pm_runtime_get(struct pci_dev *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct device *parent = dev->parent;
+
+ if (parent)
+ pm_runtime_get_sync(parent);
+ pm_runtime_get_noresume(dev);
+ /*
+ * pdev->current_state is set to PCI_D3cold during suspending,
+ * so wait until suspending completes
+ */
+ pm_runtime_barrier(dev);
+ /*
+ * Only need to resume devices in D3cold, because config
+ * registers are still accessible for devices suspended but
+ * not in D3cold.
+ */
+ if (pdev->current_state == PCI_D3cold)
+ pm_runtime_resume(dev);
+}
+
+void pci_config_pm_runtime_put(struct pci_dev *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct device *parent = dev->parent;
+
+ pm_runtime_put(dev);
+ if (parent)
+ pm_runtime_put_sync(parent);
+}
+
/**
* pci_pm_init - Initialize PM functions of given PCI device
* @dev: PCI device to handle.
diff --git a/drivers/pci/pci.h b/drivers/pci/pci.h
index bacbcba69cf3..fd92aab9904b 100644
--- a/drivers/pci/pci.h
+++ b/drivers/pci/pci.h
@@ -72,6 +72,8 @@ extern void pci_disable_enabled_device(struct pci_dev *dev);
extern int pci_finish_runtime_suspend(struct pci_dev *dev);
extern int __pci_pme_wakeup(struct pci_dev *dev, void *ign);
extern void pci_wakeup_bus(struct pci_bus *bus);
+extern void pci_config_pm_runtime_get(struct pci_dev *dev);
+extern void pci_config_pm_runtime_put(struct pci_dev *dev);
extern void pci_pm_init(struct pci_dev *dev);
extern void platform_pci_wakeup_init(struct pci_dev *dev);
extern void pci_allocate_cap_save_buffers(struct pci_dev *dev);
diff --git a/drivers/pci/pcie/aer/aerdrv_core.c b/drivers/pci/pcie/aer/aerdrv_core.c
index 06bad96af415..af4e31cd3a3b 100644
--- a/drivers/pci/pcie/aer/aerdrv_core.c
+++ b/drivers/pci/pcie/aer/aerdrv_core.c
@@ -213,6 +213,7 @@ static int report_error_detected(struct pci_dev *dev, void *data)
struct aer_broadcast_data *result_data;
result_data = (struct aer_broadcast_data *) data;
+ device_lock(&dev->dev);
dev->error_state = result_data->state;
if (!dev->driver ||
@@ -231,12 +232,14 @@ static int report_error_detected(struct pci_dev *dev, void *data)
dev->driver ?
"no AER-aware driver" : "no driver");
}
- return 0;
+ goto out;
}
err_handler = dev->driver->err_handler;
vote = err_handler->error_detected(dev, result_data->state);
result_data->result = merge_result(result_data->result, vote);
+out:
+ device_unlock(&dev->dev);
return 0;
}
@@ -247,14 +250,17 @@ static int report_mmio_enabled(struct pci_dev *dev, void *data)
struct aer_broadcast_data *result_data;
result_data = (struct aer_broadcast_data *) data;
+ device_lock(&dev->dev);
if (!dev->driver ||
!dev->driver->err_handler ||
!dev->driver->err_handler->mmio_enabled)
- return 0;
+ goto out;
err_handler = dev->driver->err_handler;
vote = err_handler->mmio_enabled(dev);
result_data->result = merge_result(result_data->result, vote);
+out:
+ device_unlock(&dev->dev);
return 0;
}
@@ -265,14 +271,17 @@ static int report_slot_reset(struct pci_dev *dev, void *data)
struct aer_broadcast_data *result_data;
result_data = (struct aer_broadcast_data *) data;
+ device_lock(&dev->dev);
if (!dev->driver ||
!dev->driver->err_handler ||
!dev->driver->err_handler->slot_reset)
- return 0;
+ goto out;
err_handler = dev->driver->err_handler;
vote = err_handler->slot_reset(dev);
result_data->result = merge_result(result_data->result, vote);
+out:
+ device_unlock(&dev->dev);
return 0;
}
@@ -280,15 +289,18 @@ static int report_resume(struct pci_dev *dev, void *data)
{
const struct pci_error_handlers *err_handler;
+ device_lock(&dev->dev);
dev->error_state = pci_channel_io_normal;
if (!dev->driver ||
!dev->driver->err_handler ||
!dev->driver->err_handler->resume)
- return 0;
+ goto out;
err_handler = dev->driver->err_handler;
err_handler->resume(dev);
+out:
+ device_unlock(&dev->dev);
return 0;
}
diff --git a/drivers/pci/pcie/portdrv_core.c b/drivers/pci/pcie/portdrv_core.c
index d03a7a39b2d8..ed129b414624 100644
--- a/drivers/pci/pcie/portdrv_core.c
+++ b/drivers/pci/pcie/portdrv_core.c
@@ -272,7 +272,8 @@ static int get_port_device_capability(struct pci_dev *dev)
}
/* Hot-Plug Capable */
- if (cap_mask & PCIE_PORT_SERVICE_HP) {
+ if ((cap_mask & PCIE_PORT_SERVICE_HP) &&
+ dev->pcie_flags_reg & PCI_EXP_FLAGS_SLOT) {
pcie_capability_read_dword(dev, PCI_EXP_SLTCAP, &reg32);
if (reg32 & PCI_EXP_SLTCAP_HPC) {
services |= PCIE_PORT_SERVICE_HP;
diff --git a/drivers/pci/proc.c b/drivers/pci/proc.c
index eb907a8faf2a..9b8505ccc56d 100644
--- a/drivers/pci/proc.c
+++ b/drivers/pci/proc.c
@@ -76,6 +76,8 @@ proc_bus_pci_read(struct file *file, char __user *buf, size_t nbytes, loff_t *pp
if (!access_ok(VERIFY_WRITE, buf, cnt))
return -EINVAL;
+ pci_config_pm_runtime_get(dev);
+
if ((pos & 1) && cnt) {
unsigned char val;
pci_user_read_config_byte(dev, pos, &val);
@@ -121,6 +123,8 @@ proc_bus_pci_read(struct file *file, char __user *buf, size_t nbytes, loff_t *pp
cnt--;
}
+ pci_config_pm_runtime_put(dev);
+
*ppos = pos;
return nbytes;
}
@@ -146,6 +150,8 @@ proc_bus_pci_write(struct file *file, const char __user *buf, size_t nbytes, lof
if (!access_ok(VERIFY_READ, buf, cnt))
return -EINVAL;
+ pci_config_pm_runtime_get(dev);
+
if ((pos & 1) && cnt) {
unsigned char val;
__get_user(val, buf);
@@ -191,6 +197,8 @@ proc_bus_pci_write(struct file *file, const char __user *buf, size_t nbytes, lof
cnt--;
}
+ pci_config_pm_runtime_put(dev);
+
*ppos = pos;
i_size_write(ino, dp->size);
return nbytes;
diff --git a/drivers/pinctrl/Kconfig b/drivers/pinctrl/Kconfig
index 7bf914df6e91..aeecf0f72cad 100644
--- a/drivers/pinctrl/Kconfig
+++ b/drivers/pinctrl/Kconfig
@@ -178,12 +178,14 @@ config PINCTRL_COH901
ports of 8 GPIO pins each.
config PINCTRL_SAMSUNG
- bool "Samsung pinctrl driver"
+ bool
+ depends on OF && GPIOLIB
select PINMUX
select PINCONF
config PINCTRL_EXYNOS4
bool "Pinctrl driver data for Exynos4 SoC"
+ depends on OF && GPIOLIB
select PINCTRL_SAMSUNG
config PINCTRL_MVEBU
diff --git a/drivers/pinctrl/spear/pinctrl-spear.c b/drivers/pinctrl/spear/pinctrl-spear.c
index 5d4f44f462f0..b1fd6ee33c6c 100644
--- a/drivers/pinctrl/spear/pinctrl-spear.c
+++ b/drivers/pinctrl/spear/pinctrl-spear.c
@@ -244,7 +244,7 @@ static int spear_pinctrl_endisable(struct pinctrl_dev *pctldev,
else
temp = ~muxreg->val;
- val |= temp;
+ val |= muxreg->mask & temp;
pmx_writel(pmx, val, muxreg->reg);
}
}
diff --git a/drivers/pinctrl/spear/pinctrl-spear1310.c b/drivers/pinctrl/spear/pinctrl-spear1310.c
index d6cca8c81b92..0436fc7895d6 100644
--- a/drivers/pinctrl/spear/pinctrl-spear1310.c
+++ b/drivers/pinctrl/spear/pinctrl-spear1310.c
@@ -25,8 +25,8 @@ static const struct pinctrl_pin_desc spear1310_pins[] = {
};
/* registers */
-#define PERIP_CFG 0x32C
- #define MCIF_SEL_SHIFT 3
+#define PERIP_CFG 0x3B0
+ #define MCIF_SEL_SHIFT 5
#define MCIF_SEL_SD (0x1 << MCIF_SEL_SHIFT)
#define MCIF_SEL_CF (0x2 << MCIF_SEL_SHIFT)
#define MCIF_SEL_XD (0x3 << MCIF_SEL_SHIFT)
@@ -164,6 +164,10 @@ static const struct pinctrl_pin_desc spear1310_pins[] = {
#define PMX_SSP0_CS0_MASK (1 << 29)
#define PMX_SSP0_CS1_2_MASK (1 << 30)
+#define PAD_DIRECTION_SEL_0 0x65C
+#define PAD_DIRECTION_SEL_1 0x660
+#define PAD_DIRECTION_SEL_2 0x664
+
/* combined macros */
#define PMX_GMII_MASK (PMX_GMIICLK_MASK | \
PMX_GMIICOL_CRS_XFERER_MIITXCLK_MASK | \
@@ -237,6 +241,10 @@ static struct spear_muxreg i2c0_muxreg[] = {
.reg = PAD_FUNCTION_EN_0,
.mask = PMX_I2C0_MASK,
.val = PMX_I2C0_MASK,
+ }, {
+ .reg = PAD_DIRECTION_SEL_0,
+ .mask = PMX_I2C0_MASK,
+ .val = PMX_I2C0_MASK,
},
};
@@ -269,6 +277,10 @@ static struct spear_muxreg ssp0_muxreg[] = {
.reg = PAD_FUNCTION_EN_0,
.mask = PMX_SSP0_MASK,
.val = PMX_SSP0_MASK,
+ }, {
+ .reg = PAD_DIRECTION_SEL_0,
+ .mask = PMX_SSP0_MASK,
+ .val = PMX_SSP0_MASK,
},
};
@@ -294,6 +306,10 @@ static struct spear_muxreg ssp0_cs0_muxreg[] = {
.reg = PAD_FUNCTION_EN_2,
.mask = PMX_SSP0_CS0_MASK,
.val = PMX_SSP0_CS0_MASK,
+ }, {
+ .reg = PAD_DIRECTION_SEL_2,
+ .mask = PMX_SSP0_CS0_MASK,
+ .val = PMX_SSP0_CS0_MASK,
},
};
@@ -319,6 +335,10 @@ static struct spear_muxreg ssp0_cs1_2_muxreg[] = {
.reg = PAD_FUNCTION_EN_2,
.mask = PMX_SSP0_CS1_2_MASK,
.val = PMX_SSP0_CS1_2_MASK,
+ }, {
+ .reg = PAD_DIRECTION_SEL_2,
+ .mask = PMX_SSP0_CS1_2_MASK,
+ .val = PMX_SSP0_CS1_2_MASK,
},
};
@@ -352,6 +372,10 @@ static struct spear_muxreg i2s0_muxreg[] = {
.reg = PAD_FUNCTION_EN_0,
.mask = PMX_I2S0_MASK,
.val = PMX_I2S0_MASK,
+ }, {
+ .reg = PAD_DIRECTION_SEL_0,
+ .mask = PMX_I2S0_MASK,
+ .val = PMX_I2S0_MASK,
},
};
@@ -384,6 +408,10 @@ static struct spear_muxreg i2s1_muxreg[] = {
.reg = PAD_FUNCTION_EN_1,
.mask = PMX_I2S1_MASK,
.val = PMX_I2S1_MASK,
+ }, {
+ .reg = PAD_DIRECTION_SEL_1,
+ .mask = PMX_I2S1_MASK,
+ .val = PMX_I2S1_MASK,
},
};
@@ -418,6 +446,10 @@ static struct spear_muxreg clcd_muxreg[] = {
.reg = PAD_FUNCTION_EN_0,
.mask = PMX_CLCD1_MASK,
.val = PMX_CLCD1_MASK,
+ }, {
+ .reg = PAD_DIRECTION_SEL_0,
+ .mask = PMX_CLCD1_MASK,
+ .val = PMX_CLCD1_MASK,
},
};
@@ -443,6 +475,10 @@ static struct spear_muxreg clcd_high_res_muxreg[] = {
.reg = PAD_FUNCTION_EN_1,
.mask = PMX_CLCD2_MASK,
.val = PMX_CLCD2_MASK,
+ }, {
+ .reg = PAD_DIRECTION_SEL_1,
+ .mask = PMX_CLCD2_MASK,
+ .val = PMX_CLCD2_MASK,
},
};
@@ -461,7 +497,7 @@ static struct spear_pingroup clcd_high_res_pingroup = {
.nmodemuxs = ARRAY_SIZE(clcd_high_res_modemux),
};
-static const char *const clcd_grps[] = { "clcd_grp", "clcd_high_res" };
+static const char *const clcd_grps[] = { "clcd_grp", "clcd_high_res_grp" };
static struct spear_function clcd_function = {
.name = "clcd",
.groups = clcd_grps,
@@ -479,6 +515,14 @@ static struct spear_muxreg arm_gpio_muxreg[] = {
.reg = PAD_FUNCTION_EN_1,
.mask = PMX_EGPIO_1_GRP_MASK,
.val = PMX_EGPIO_1_GRP_MASK,
+ }, {
+ .reg = PAD_DIRECTION_SEL_0,
+ .mask = PMX_EGPIO_0_GRP_MASK,
+ .val = PMX_EGPIO_0_GRP_MASK,
+ }, {
+ .reg = PAD_DIRECTION_SEL_1,
+ .mask = PMX_EGPIO_1_GRP_MASK,
+ .val = PMX_EGPIO_1_GRP_MASK,
},
};
@@ -511,6 +555,10 @@ static struct spear_muxreg smi_2_chips_muxreg[] = {
.reg = PAD_FUNCTION_EN_0,
.mask = PMX_SMI_MASK,
.val = PMX_SMI_MASK,
+ }, {
+ .reg = PAD_DIRECTION_SEL_0,
+ .mask = PMX_SMI_MASK,
+ .val = PMX_SMI_MASK,
},
};
@@ -539,6 +587,14 @@ static struct spear_muxreg smi_4_chips_muxreg[] = {
.reg = PAD_FUNCTION_EN_1,
.mask = PMX_SMINCS2_MASK | PMX_SMINCS3_MASK,
.val = PMX_SMINCS2_MASK | PMX_SMINCS3_MASK,
+ }, {
+ .reg = PAD_DIRECTION_SEL_0,
+ .mask = PMX_SMI_MASK,
+ .val = PMX_SMI_MASK,
+ }, {
+ .reg = PAD_DIRECTION_SEL_1,
+ .mask = PMX_SMINCS2_MASK | PMX_SMINCS3_MASK,
+ .val = PMX_SMINCS2_MASK | PMX_SMINCS3_MASK,
},
};
@@ -573,6 +629,10 @@ static struct spear_muxreg gmii_muxreg[] = {
.reg = PAD_FUNCTION_EN_0,
.mask = PMX_GMII_MASK,
.val = PMX_GMII_MASK,
+ }, {
+ .reg = PAD_DIRECTION_SEL_0,
+ .mask = PMX_GMII_MASK,
+ .val = PMX_GMII_MASK,
},
};
@@ -615,6 +675,18 @@ static struct spear_muxreg rgmii_muxreg[] = {
.reg = PAD_FUNCTION_EN_2,
.mask = PMX_RGMII_REG2_MASK,
.val = 0,
+ }, {
+ .reg = PAD_DIRECTION_SEL_0,
+ .mask = PMX_RGMII_REG0_MASK,
+ .val = PMX_RGMII_REG0_MASK,
+ }, {
+ .reg = PAD_DIRECTION_SEL_1,
+ .mask = PMX_RGMII_REG1_MASK,
+ .val = PMX_RGMII_REG1_MASK,
+ }, {
+ .reg = PAD_DIRECTION_SEL_2,
+ .mask = PMX_RGMII_REG2_MASK,
+ .val = PMX_RGMII_REG2_MASK,
},
};
@@ -649,6 +721,10 @@ static struct spear_muxreg smii_0_1_2_muxreg[] = {
.reg = PAD_FUNCTION_EN_1,
.mask = PMX_SMII_0_1_2_MASK,
.val = 0,
+ }, {
+ .reg = PAD_DIRECTION_SEL_1,
+ .mask = PMX_SMII_0_1_2_MASK,
+ .val = PMX_SMII_0_1_2_MASK,
},
};
@@ -681,6 +757,10 @@ static struct spear_muxreg ras_mii_txclk_muxreg[] = {
.reg = PAD_FUNCTION_EN_1,
.mask = PMX_NFCE2_MASK,
.val = 0,
+ }, {
+ .reg = PAD_DIRECTION_SEL_1,
+ .mask = PMX_NFCE2_MASK,
+ .val = PMX_NFCE2_MASK,
},
};
@@ -721,6 +801,14 @@ static struct spear_muxreg nand_8bit_muxreg[] = {
.reg = PAD_FUNCTION_EN_1,
.mask = PMX_NAND8BIT_1_MASK,
.val = PMX_NAND8BIT_1_MASK,
+ }, {
+ .reg = PAD_DIRECTION_SEL_0,
+ .mask = PMX_NAND8BIT_0_MASK,
+ .val = PMX_NAND8BIT_0_MASK,
+ }, {
+ .reg = PAD_DIRECTION_SEL_1,
+ .mask = PMX_NAND8BIT_1_MASK,
+ .val = PMX_NAND8BIT_1_MASK,
},
};
@@ -747,6 +835,10 @@ static struct spear_muxreg nand_16bit_muxreg[] = {
.reg = PAD_FUNCTION_EN_1,
.mask = PMX_NAND16BIT_1_MASK,
.val = PMX_NAND16BIT_1_MASK,
+ }, {
+ .reg = PAD_DIRECTION_SEL_1,
+ .mask = PMX_NAND16BIT_1_MASK,
+ .val = PMX_NAND16BIT_1_MASK,
},
};
@@ -772,6 +864,10 @@ static struct spear_muxreg nand_4_chips_muxreg[] = {
.reg = PAD_FUNCTION_EN_1,
.mask = PMX_NAND_4CHIPS_MASK,
.val = PMX_NAND_4CHIPS_MASK,
+ }, {
+ .reg = PAD_DIRECTION_SEL_1,
+ .mask = PMX_NAND_4CHIPS_MASK,
+ .val = PMX_NAND_4CHIPS_MASK,
},
};
@@ -833,6 +929,10 @@ static struct spear_muxreg keyboard_rowcol6_8_muxreg[] = {
.reg = PAD_FUNCTION_EN_1,
.mask = PMX_KBD_ROWCOL68_MASK,
.val = PMX_KBD_ROWCOL68_MASK,
+ }, {
+ .reg = PAD_DIRECTION_SEL_1,
+ .mask = PMX_KBD_ROWCOL68_MASK,
+ .val = PMX_KBD_ROWCOL68_MASK,
},
};
@@ -866,6 +966,10 @@ static struct spear_muxreg uart0_muxreg[] = {
.reg = PAD_FUNCTION_EN_0,
.mask = PMX_UART0_MASK,
.val = PMX_UART0_MASK,
+ }, {
+ .reg = PAD_DIRECTION_SEL_0,
+ .mask = PMX_UART0_MASK,
+ .val = PMX_UART0_MASK,
},
};
@@ -891,6 +995,10 @@ static struct spear_muxreg uart0_modem_muxreg[] = {
.reg = PAD_FUNCTION_EN_1,
.mask = PMX_UART0_MODEM_MASK,
.val = PMX_UART0_MODEM_MASK,
+ }, {
+ .reg = PAD_DIRECTION_SEL_1,
+ .mask = PMX_UART0_MODEM_MASK,
+ .val = PMX_UART0_MODEM_MASK,
},
};
@@ -923,6 +1031,10 @@ static struct spear_muxreg gpt0_tmr0_muxreg[] = {
.reg = PAD_FUNCTION_EN_1,
.mask = PMX_GPT0_TMR0_MASK,
.val = PMX_GPT0_TMR0_MASK,
+ }, {
+ .reg = PAD_DIRECTION_SEL_1,
+ .mask = PMX_GPT0_TMR0_MASK,
+ .val = PMX_GPT0_TMR0_MASK,
},
};
@@ -948,6 +1060,10 @@ static struct spear_muxreg gpt0_tmr1_muxreg[] = {
.reg = PAD_FUNCTION_EN_1,
.mask = PMX_GPT0_TMR1_MASK,
.val = PMX_GPT0_TMR1_MASK,
+ }, {
+ .reg = PAD_DIRECTION_SEL_1,
+ .mask = PMX_GPT0_TMR1_MASK,
+ .val = PMX_GPT0_TMR1_MASK,
},
};
@@ -980,6 +1096,10 @@ static struct spear_muxreg gpt1_tmr0_muxreg[] = {
.reg = PAD_FUNCTION_EN_1,
.mask = PMX_GPT1_TMR0_MASK,
.val = PMX_GPT1_TMR0_MASK,
+ }, {
+ .reg = PAD_DIRECTION_SEL_1,
+ .mask = PMX_GPT1_TMR0_MASK,
+ .val = PMX_GPT1_TMR0_MASK,
},
};
@@ -1005,6 +1125,10 @@ static struct spear_muxreg gpt1_tmr1_muxreg[] = {
.reg = PAD_FUNCTION_EN_1,
.mask = PMX_GPT1_TMR1_MASK,
.val = PMX_GPT1_TMR1_MASK,
+ }, {
+ .reg = PAD_DIRECTION_SEL_1,
+ .mask = PMX_GPT1_TMR1_MASK,
+ .val = PMX_GPT1_TMR1_MASK,
},
};
@@ -1049,6 +1173,20 @@ static const unsigned mcif_pins[] = { 86, 87, 88, 89, 90, 91, 92, 93, 213, 214,
.reg = PAD_FUNCTION_EN_2, \
.mask = PMX_MCIFALL_2_MASK, \
.val = PMX_MCIFALL_2_MASK, \
+ }, { \
+ .reg = PAD_DIRECTION_SEL_0, \
+ .mask = PMX_MCI_DATA8_15_MASK, \
+ .val = PMX_MCI_DATA8_15_MASK, \
+ }, { \
+ .reg = PAD_DIRECTION_SEL_1, \
+ .mask = PMX_MCIFALL_1_MASK | PMX_NFWPRT1_MASK | \
+ PMX_NFWPRT2_MASK, \
+ .val = PMX_MCIFALL_1_MASK | PMX_NFWPRT1_MASK | \
+ PMX_NFWPRT2_MASK, \
+ }, { \
+ .reg = PAD_DIRECTION_SEL_2, \
+ .mask = PMX_MCIFALL_2_MASK, \
+ .val = PMX_MCIFALL_2_MASK, \
}
/* sdhci device */
@@ -1154,6 +1292,10 @@ static struct spear_muxreg touch_xy_muxreg[] = {
.reg = PAD_FUNCTION_EN_2,
.mask = PMX_TOUCH_XY_MASK,
.val = PMX_TOUCH_XY_MASK,
+ }, {
+ .reg = PAD_DIRECTION_SEL_2,
+ .mask = PMX_TOUCH_XY_MASK,
+ .val = PMX_TOUCH_XY_MASK,
},
};
@@ -1187,6 +1329,10 @@ static struct spear_muxreg uart1_dis_i2c_muxreg[] = {
.reg = PAD_FUNCTION_EN_0,
.mask = PMX_I2C0_MASK,
.val = 0,
+ }, {
+ .reg = PAD_DIRECTION_SEL_0,
+ .mask = PMX_I2C0_MASK,
+ .val = PMX_I2C0_MASK,
},
};
@@ -1213,6 +1359,12 @@ static struct spear_muxreg uart1_dis_sd_muxreg[] = {
.mask = PMX_MCIDATA1_MASK |
PMX_MCIDATA2_MASK,
.val = 0,
+ }, {
+ .reg = PAD_DIRECTION_SEL_1,
+ .mask = PMX_MCIDATA1_MASK |
+ PMX_MCIDATA2_MASK,
+ .val = PMX_MCIDATA1_MASK |
+ PMX_MCIDATA2_MASK,
},
};
@@ -1246,6 +1398,10 @@ static struct spear_muxreg uart2_3_muxreg[] = {
.reg = PAD_FUNCTION_EN_0,
.mask = PMX_I2S0_MASK,
.val = 0,
+ }, {
+ .reg = PAD_DIRECTION_SEL_0,
+ .mask = PMX_I2S0_MASK,
+ .val = PMX_I2S0_MASK,
},
};
@@ -1278,6 +1434,10 @@ static struct spear_muxreg uart4_muxreg[] = {
.reg = PAD_FUNCTION_EN_0,
.mask = PMX_I2S0_MASK | PMX_CLCD1_MASK,
.val = 0,
+ }, {
+ .reg = PAD_DIRECTION_SEL_0,
+ .mask = PMX_I2S0_MASK | PMX_CLCD1_MASK,
+ .val = PMX_I2S0_MASK | PMX_CLCD1_MASK,
},
};
@@ -1310,6 +1470,10 @@ static struct spear_muxreg uart5_muxreg[] = {
.reg = PAD_FUNCTION_EN_0,
.mask = PMX_CLCD1_MASK,
.val = 0,
+ }, {
+ .reg = PAD_DIRECTION_SEL_0,
+ .mask = PMX_CLCD1_MASK,
+ .val = PMX_CLCD1_MASK,
},
};
@@ -1344,6 +1508,10 @@ static struct spear_muxreg rs485_0_1_tdm_0_1_muxreg[] = {
.reg = PAD_FUNCTION_EN_0,
.mask = PMX_CLCD1_MASK,
.val = 0,
+ }, {
+ .reg = PAD_DIRECTION_SEL_0,
+ .mask = PMX_CLCD1_MASK,
+ .val = PMX_CLCD1_MASK,
},
};
@@ -1376,6 +1544,10 @@ static struct spear_muxreg i2c_1_2_muxreg[] = {
.reg = PAD_FUNCTION_EN_0,
.mask = PMX_CLCD1_MASK,
.val = 0,
+ }, {
+ .reg = PAD_DIRECTION_SEL_0,
+ .mask = PMX_CLCD1_MASK,
+ .val = PMX_CLCD1_MASK,
},
};
@@ -1409,6 +1581,10 @@ static struct spear_muxreg i2c3_dis_smi_clcd_muxreg[] = {
.reg = PAD_FUNCTION_EN_0,
.mask = PMX_CLCD1_MASK | PMX_SMI_MASK,
.val = 0,
+ }, {
+ .reg = PAD_DIRECTION_SEL_0,
+ .mask = PMX_CLCD1_MASK | PMX_SMI_MASK,
+ .val = PMX_CLCD1_MASK | PMX_SMI_MASK,
},
};
@@ -1435,6 +1611,10 @@ static struct spear_muxreg i2c3_dis_sd_i2s0_muxreg[] = {
.reg = PAD_FUNCTION_EN_1,
.mask = PMX_I2S1_MASK | PMX_MCIDATA3_MASK,
.val = 0,
+ }, {
+ .reg = PAD_DIRECTION_SEL_1,
+ .mask = PMX_I2S1_MASK | PMX_MCIDATA3_MASK,
+ .val = PMX_I2S1_MASK | PMX_MCIDATA3_MASK,
},
};
@@ -1469,6 +1649,10 @@ static struct spear_muxreg i2c_4_5_dis_smi_muxreg[] = {
.reg = PAD_FUNCTION_EN_0,
.mask = PMX_SMI_MASK,
.val = 0,
+ }, {
+ .reg = PAD_DIRECTION_SEL_0,
+ .mask = PMX_SMI_MASK,
+ .val = PMX_SMI_MASK,
},
};
@@ -1499,6 +1683,14 @@ static struct spear_muxreg i2c4_dis_sd_muxreg[] = {
.reg = PAD_FUNCTION_EN_2,
.mask = PMX_MCIDATA5_MASK,
.val = 0,
+ }, {
+ .reg = PAD_DIRECTION_SEL_1,
+ .mask = PMX_MCIDATA4_MASK,
+ .val = PMX_MCIDATA4_MASK,
+ }, {
+ .reg = PAD_DIRECTION_SEL_2,
+ .mask = PMX_MCIDATA5_MASK,
+ .val = PMX_MCIDATA5_MASK,
},
};
@@ -1526,6 +1718,12 @@ static struct spear_muxreg i2c5_dis_sd_muxreg[] = {
.mask = PMX_MCIDATA6_MASK |
PMX_MCIDATA7_MASK,
.val = 0,
+ }, {
+ .reg = PAD_DIRECTION_SEL_2,
+ .mask = PMX_MCIDATA6_MASK |
+ PMX_MCIDATA7_MASK,
+ .val = PMX_MCIDATA6_MASK |
+ PMX_MCIDATA7_MASK,
},
};
@@ -1560,6 +1758,10 @@ static struct spear_muxreg i2c_6_7_dis_kbd_muxreg[] = {
.reg = PAD_FUNCTION_EN_1,
.mask = PMX_KBD_ROWCOL25_MASK,
.val = 0,
+ }, {
+ .reg = PAD_DIRECTION_SEL_1,
+ .mask = PMX_KBD_ROWCOL25_MASK,
+ .val = PMX_KBD_ROWCOL25_MASK,
},
};
@@ -1587,6 +1789,12 @@ static struct spear_muxreg i2c6_dis_sd_muxreg[] = {
.mask = PMX_MCIIORDRE_MASK |
PMX_MCIIOWRWE_MASK,
.val = 0,
+ }, {
+ .reg = PAD_DIRECTION_SEL_2,
+ .mask = PMX_MCIIORDRE_MASK |
+ PMX_MCIIOWRWE_MASK,
+ .val = PMX_MCIIORDRE_MASK |
+ PMX_MCIIOWRWE_MASK,
},
};
@@ -1613,6 +1821,12 @@ static struct spear_muxreg i2c7_dis_sd_muxreg[] = {
.mask = PMX_MCIRESETCF_MASK |
PMX_MCICS0CE_MASK,
.val = 0,
+ }, {
+ .reg = PAD_DIRECTION_SEL_2,
+ .mask = PMX_MCIRESETCF_MASK |
+ PMX_MCICS0CE_MASK,
+ .val = PMX_MCIRESETCF_MASK |
+ PMX_MCICS0CE_MASK,
},
};
@@ -1651,6 +1865,14 @@ static struct spear_muxreg can0_dis_nor_muxreg[] = {
.reg = PAD_FUNCTION_EN_1,
.mask = PMX_NFRSTPWDWN3_MASK,
.val = 0,
+ }, {
+ .reg = PAD_DIRECTION_SEL_0,
+ .mask = PMX_NFRSTPWDWN2_MASK,
+ .val = PMX_NFRSTPWDWN2_MASK,
+ }, {
+ .reg = PAD_DIRECTION_SEL_1,
+ .mask = PMX_NFRSTPWDWN3_MASK,
+ .val = PMX_NFRSTPWDWN3_MASK,
},
};
@@ -1677,6 +1899,10 @@ static struct spear_muxreg can0_dis_sd_muxreg[] = {
.reg = PAD_FUNCTION_EN_2,
.mask = PMX_MCICFINTR_MASK | PMX_MCIIORDY_MASK,
.val = 0,
+ }, {
+ .reg = PAD_DIRECTION_SEL_2,
+ .mask = PMX_MCICFINTR_MASK | PMX_MCIIORDY_MASK,
+ .val = PMX_MCICFINTR_MASK | PMX_MCIIORDY_MASK,
},
};
@@ -1711,6 +1937,10 @@ static struct spear_muxreg can1_dis_sd_muxreg[] = {
.reg = PAD_FUNCTION_EN_2,
.mask = PMX_MCICS1_MASK | PMX_MCIDMAACK_MASK,
.val = 0,
+ }, {
+ .reg = PAD_DIRECTION_SEL_2,
+ .mask = PMX_MCICS1_MASK | PMX_MCIDMAACK_MASK,
+ .val = PMX_MCICS1_MASK | PMX_MCIDMAACK_MASK,
},
};
@@ -1737,6 +1967,10 @@ static struct spear_muxreg can1_dis_kbd_muxreg[] = {
.reg = PAD_FUNCTION_EN_1,
.mask = PMX_KBD_ROWCOL25_MASK,
.val = 0,
+ }, {
+ .reg = PAD_DIRECTION_SEL_1,
+ .mask = PMX_KBD_ROWCOL25_MASK,
+ .val = PMX_KBD_ROWCOL25_MASK,
},
};
@@ -1763,29 +1997,64 @@ static struct spear_function can1_function = {
.ngroups = ARRAY_SIZE(can1_grps),
};
-/* Pad multiplexing for pci device */
-static const unsigned pci_sata_pins[] = { 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 18,
+/* Pad multiplexing for (ras-ip) pci device */
+static const unsigned pci_pins[] = { 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 18,
19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36,
37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54,
55, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99 };
-#define PCI_SATA_MUXREG \
- { \
- .reg = PAD_FUNCTION_EN_0, \
- .mask = PMX_MCI_DATA8_15_MASK, \
- .val = 0, \
- }, { \
- .reg = PAD_FUNCTION_EN_1, \
- .mask = PMX_PCI_REG1_MASK, \
- .val = 0, \
- }, { \
- .reg = PAD_FUNCTION_EN_2, \
- .mask = PMX_PCI_REG2_MASK, \
- .val = 0, \
- }
-/* pad multiplexing for pcie0 device */
+static struct spear_muxreg pci_muxreg[] = {
+ {
+ .reg = PAD_FUNCTION_EN_0,
+ .mask = PMX_MCI_DATA8_15_MASK,
+ .val = 0,
+ }, {
+ .reg = PAD_FUNCTION_EN_1,
+ .mask = PMX_PCI_REG1_MASK,
+ .val = 0,
+ }, {
+ .reg = PAD_FUNCTION_EN_2,
+ .mask = PMX_PCI_REG2_MASK,
+ .val = 0,
+ }, {
+ .reg = PAD_DIRECTION_SEL_0,
+ .mask = PMX_MCI_DATA8_15_MASK,
+ .val = PMX_MCI_DATA8_15_MASK,
+ }, {
+ .reg = PAD_DIRECTION_SEL_1,
+ .mask = PMX_PCI_REG1_MASK,
+ .val = PMX_PCI_REG1_MASK,
+ }, {
+ .reg = PAD_DIRECTION_SEL_2,
+ .mask = PMX_PCI_REG2_MASK,
+ .val = PMX_PCI_REG2_MASK,
+ },
+};
+
+static struct spear_modemux pci_modemux[] = {
+ {
+ .muxregs = pci_muxreg,
+ .nmuxregs = ARRAY_SIZE(pci_muxreg),
+ },
+};
+
+static struct spear_pingroup pci_pingroup = {
+ .name = "pci_grp",
+ .pins = pci_pins,
+ .npins = ARRAY_SIZE(pci_pins),
+ .modemuxs = pci_modemux,
+ .nmodemuxs = ARRAY_SIZE(pci_modemux),
+};
+
+static const char *const pci_grps[] = { "pci_grp" };
+static struct spear_function pci_function = {
+ .name = "pci",
+ .groups = pci_grps,
+ .ngroups = ARRAY_SIZE(pci_grps),
+};
+
+/* pad multiplexing for (fix-part) pcie0 device */
static struct spear_muxreg pcie0_muxreg[] = {
- PCI_SATA_MUXREG,
{
.reg = PCIE_SATA_CFG,
.mask = PCIE_CFG_VAL(0),
@@ -1802,15 +2071,12 @@ static struct spear_modemux pcie0_modemux[] = {
static struct spear_pingroup pcie0_pingroup = {
.name = "pcie0_grp",
- .pins = pci_sata_pins,
- .npins = ARRAY_SIZE(pci_sata_pins),
.modemuxs = pcie0_modemux,
.nmodemuxs = ARRAY_SIZE(pcie0_modemux),
};
-/* pad multiplexing for pcie1 device */
+/* pad multiplexing for (fix-part) pcie1 device */
static struct spear_muxreg pcie1_muxreg[] = {
- PCI_SATA_MUXREG,
{
.reg = PCIE_SATA_CFG,
.mask = PCIE_CFG_VAL(1),
@@ -1827,15 +2093,12 @@ static struct spear_modemux pcie1_modemux[] = {
static struct spear_pingroup pcie1_pingroup = {
.name = "pcie1_grp",
- .pins = pci_sata_pins,
- .npins = ARRAY_SIZE(pci_sata_pins),
.modemuxs = pcie1_modemux,
.nmodemuxs = ARRAY_SIZE(pcie1_modemux),
};
-/* pad multiplexing for pcie2 device */
+/* pad multiplexing for (fix-part) pcie2 device */
static struct spear_muxreg pcie2_muxreg[] = {
- PCI_SATA_MUXREG,
{
.reg = PCIE_SATA_CFG,
.mask = PCIE_CFG_VAL(2),
@@ -1852,22 +2115,20 @@ static struct spear_modemux pcie2_modemux[] = {
static struct spear_pingroup pcie2_pingroup = {
.name = "pcie2_grp",
- .pins = pci_sata_pins,
- .npins = ARRAY_SIZE(pci_sata_pins),
.modemuxs = pcie2_modemux,
.nmodemuxs = ARRAY_SIZE(pcie2_modemux),
};
-static const char *const pci_grps[] = { "pcie0_grp", "pcie1_grp", "pcie2_grp" };
-static struct spear_function pci_function = {
- .name = "pci",
- .groups = pci_grps,
- .ngroups = ARRAY_SIZE(pci_grps),
+static const char *const pcie_grps[] = { "pcie0_grp", "pcie1_grp", "pcie2_grp"
+};
+static struct spear_function pcie_function = {
+ .name = "pci_express",
+ .groups = pcie_grps,
+ .ngroups = ARRAY_SIZE(pcie_grps),
};
/* pad multiplexing for sata0 device */
static struct spear_muxreg sata0_muxreg[] = {
- PCI_SATA_MUXREG,
{
.reg = PCIE_SATA_CFG,
.mask = SATA_CFG_VAL(0),
@@ -1884,15 +2145,12 @@ static struct spear_modemux sata0_modemux[] = {
static struct spear_pingroup sata0_pingroup = {
.name = "sata0_grp",
- .pins = pci_sata_pins,
- .npins = ARRAY_SIZE(pci_sata_pins),
.modemuxs = sata0_modemux,
.nmodemuxs = ARRAY_SIZE(sata0_modemux),
};
/* pad multiplexing for sata1 device */
static struct spear_muxreg sata1_muxreg[] = {
- PCI_SATA_MUXREG,
{
.reg = PCIE_SATA_CFG,
.mask = SATA_CFG_VAL(1),
@@ -1909,15 +2167,12 @@ static struct spear_modemux sata1_modemux[] = {
static struct spear_pingroup sata1_pingroup = {
.name = "sata1_grp",
- .pins = pci_sata_pins,
- .npins = ARRAY_SIZE(pci_sata_pins),
.modemuxs = sata1_modemux,
.nmodemuxs = ARRAY_SIZE(sata1_modemux),
};
/* pad multiplexing for sata2 device */
static struct spear_muxreg sata2_muxreg[] = {
- PCI_SATA_MUXREG,
{
.reg = PCIE_SATA_CFG,
.mask = SATA_CFG_VAL(2),
@@ -1934,8 +2189,6 @@ static struct spear_modemux sata2_modemux[] = {
static struct spear_pingroup sata2_pingroup = {
.name = "sata2_grp",
- .pins = pci_sata_pins,
- .npins = ARRAY_SIZE(pci_sata_pins),
.modemuxs = sata2_modemux,
.nmodemuxs = ARRAY_SIZE(sata2_modemux),
};
@@ -1957,6 +2210,14 @@ static struct spear_muxreg ssp1_dis_kbd_muxreg[] = {
PMX_KBD_COL0_MASK | PMX_NFIO8_15_MASK | PMX_NFCE1_MASK |
PMX_NFCE2_MASK,
.val = 0,
+ }, {
+ .reg = PAD_DIRECTION_SEL_1,
+ .mask = PMX_KBD_ROWCOL25_MASK | PMX_KBD_COL1_MASK |
+ PMX_KBD_COL0_MASK | PMX_NFIO8_15_MASK | PMX_NFCE1_MASK |
+ PMX_NFCE2_MASK,
+ .val = PMX_KBD_ROWCOL25_MASK | PMX_KBD_COL1_MASK |
+ PMX_KBD_COL0_MASK | PMX_NFIO8_15_MASK | PMX_NFCE1_MASK |
+ PMX_NFCE2_MASK,
},
};
@@ -1983,6 +2244,12 @@ static struct spear_muxreg ssp1_dis_sd_muxreg[] = {
.mask = PMX_MCIADDR0ALE_MASK | PMX_MCIADDR2_MASK |
PMX_MCICECF_MASK | PMX_MCICEXD_MASK,
.val = 0,
+ }, {
+ .reg = PAD_DIRECTION_SEL_2,
+ .mask = PMX_MCIADDR0ALE_MASK | PMX_MCIADDR2_MASK |
+ PMX_MCICECF_MASK | PMX_MCICEXD_MASK,
+ .val = PMX_MCIADDR0ALE_MASK | PMX_MCIADDR2_MASK |
+ PMX_MCICECF_MASK | PMX_MCICEXD_MASK,
},
};
@@ -2017,6 +2284,12 @@ static struct spear_muxreg gpt64_muxreg[] = {
.mask = PMX_MCICDCF1_MASK | PMX_MCICDCF2_MASK | PMX_MCICDXD_MASK
| PMX_MCILEDS_MASK,
.val = 0,
+ }, {
+ .reg = PAD_DIRECTION_SEL_2,
+ .mask = PMX_MCICDCF1_MASK | PMX_MCICDCF2_MASK | PMX_MCICDXD_MASK
+ | PMX_MCILEDS_MASK,
+ .val = PMX_MCICDCF1_MASK | PMX_MCICDCF2_MASK | PMX_MCICDXD_MASK
+ | PMX_MCILEDS_MASK,
},
};
@@ -2093,6 +2366,7 @@ static struct spear_pingroup *spear1310_pingroups[] = {
&can0_dis_sd_pingroup,
&can1_dis_sd_pingroup,
&can1_dis_kbd_pingroup,
+ &pci_pingroup,
&pcie0_pingroup,
&pcie1_pingroup,
&pcie2_pingroup,
@@ -2138,6 +2412,7 @@ static struct spear_function *spear1310_functions[] = {
&can0_function,
&can1_function,
&pci_function,
+ &pcie_function,
&sata_function,
&ssp1_function,
&gpt64_function,
diff --git a/drivers/pinctrl/spear/pinctrl-spear1340.c b/drivers/pinctrl/spear/pinctrl-spear1340.c
index a0eb057e55bd..0606b8cf3f2c 100644
--- a/drivers/pinctrl/spear/pinctrl-spear1340.c
+++ b/drivers/pinctrl/spear/pinctrl-spear1340.c
@@ -213,7 +213,7 @@ static const struct pinctrl_pin_desc spear1340_pins[] = {
* Pad multiplexing for making all pads as gpio's. This is done to override the
* values passed from bootloader and start from scratch.
*/
-static const unsigned pads_as_gpio_pins[] = { 251 };
+static const unsigned pads_as_gpio_pins[] = { 12, 88, 89, 251 };
static struct spear_muxreg pads_as_gpio_muxreg[] = {
{
.reg = PAD_FUNCTION_EN_1,
@@ -1692,7 +1692,43 @@ static struct spear_pingroup clcd_pingroup = {
.nmodemuxs = ARRAY_SIZE(clcd_modemux),
};
-static const char *const clcd_grps[] = { "clcd_grp" };
+/* Disable cld runtime to save panel damage */
+static struct spear_muxreg clcd_sleep_muxreg[] = {
+ {
+ .reg = PAD_SHARED_IP_EN_1,
+ .mask = ARM_TRACE_MASK | MIPHY_DBG_MASK,
+ .val = 0,
+ }, {
+ .reg = PAD_FUNCTION_EN_5,
+ .mask = CLCD_REG4_MASK | CLCD_AND_ARM_TRACE_REG4_MASK,
+ .val = 0x0,
+ }, {
+ .reg = PAD_FUNCTION_EN_6,
+ .mask = CLCD_AND_ARM_TRACE_REG5_MASK,
+ .val = 0x0,
+ }, {
+ .reg = PAD_FUNCTION_EN_7,
+ .mask = CLCD_AND_ARM_TRACE_REG6_MASK,
+ .val = 0x0,
+ },
+};
+
+static struct spear_modemux clcd_sleep_modemux[] = {
+ {
+ .muxregs = clcd_sleep_muxreg,
+ .nmuxregs = ARRAY_SIZE(clcd_sleep_muxreg),
+ },
+};
+
+static struct spear_pingroup clcd_sleep_pingroup = {
+ .name = "clcd_sleep_grp",
+ .pins = clcd_pins,
+ .npins = ARRAY_SIZE(clcd_pins),
+ .modemuxs = clcd_sleep_modemux,
+ .nmodemuxs = ARRAY_SIZE(clcd_sleep_modemux),
+};
+
+static const char *const clcd_grps[] = { "clcd_grp", "clcd_sleep_grp" };
static struct spear_function clcd_function = {
.name = "clcd",
.groups = clcd_grps,
@@ -1893,6 +1929,7 @@ static struct spear_pingroup *spear1340_pingroups[] = {
&sdhci_pingroup,
&cf_pingroup,
&xd_pingroup,
+ &clcd_sleep_pingroup,
&clcd_pingroup,
&arm_trace_pingroup,
&miphy_dbg_pingroup,
diff --git a/drivers/pinctrl/spear/pinctrl-spear320.c b/drivers/pinctrl/spear/pinctrl-spear320.c
index 020b1e0bdb3e..ca47b0e50780 100644
--- a/drivers/pinctrl/spear/pinctrl-spear320.c
+++ b/drivers/pinctrl/spear/pinctrl-spear320.c
@@ -2240,6 +2240,10 @@ static struct spear_muxreg pwm2_pin_34_muxreg[] = {
.mask = PMX_SSP_CS_MASK,
.val = 0,
}, {
+ .reg = MODE_CONFIG_REG,
+ .mask = PMX_PWM_MASK,
+ .val = PMX_PWM_MASK,
+ }, {
.reg = IP_SEL_PAD_30_39_REG,
.mask = PMX_PL_34_MASK,
.val = PMX_PWM2_PL_34_VAL,
@@ -2956,9 +2960,9 @@ static struct spear_function mii2_function = {
};
/* Pad multiplexing for cadence mii 1_2 as smii or rmii device */
-static const unsigned smii0_1_pins[] = { 10, 11, 13, 14, 15, 16, 17, 18, 19, 20,
+static const unsigned rmii0_1_pins[] = { 10, 11, 13, 14, 15, 16, 17, 18, 19, 20,
21, 22, 23, 24, 25, 26, 27 };
-static const unsigned rmii0_1_pins[] = { 10, 11, 21, 22, 23, 24, 25, 26, 27 };
+static const unsigned smii0_1_pins[] = { 10, 11, 21, 22, 23, 24, 25, 26, 27 };
static struct spear_muxreg mii0_1_muxreg[] = {
{
.reg = PMX_CONFIG_REG,
diff --git a/drivers/pinctrl/spear/pinctrl-spear3xx.h b/drivers/pinctrl/spear/pinctrl-spear3xx.h
index 31f44347f17c..7860b36053c4 100644
--- a/drivers/pinctrl/spear/pinctrl-spear3xx.h
+++ b/drivers/pinctrl/spear/pinctrl-spear3xx.h
@@ -15,6 +15,7 @@
#include "pinctrl-spear.h"
/* pad mux declarations */
+#define PMX_PWM_MASK (1 << 16)
#define PMX_FIRDA_MASK (1 << 14)
#define PMX_I2C_MASK (1 << 13)
#define PMX_SSP_CS_MASK (1 << 12)
diff --git a/drivers/rapidio/rio.c b/drivers/rapidio/rio.c
index c17ae22567e0..0c6fcb461faf 100644
--- a/drivers/rapidio/rio.c
+++ b/drivers/rapidio/rio.c
@@ -401,7 +401,7 @@ EXPORT_SYMBOL_GPL(rio_release_inb_pwrite);
/**
* rio_map_inb_region -- Map inbound memory region.
* @mport: Master port.
- * @lstart: physical address of memory region to be mapped
+ * @local: physical address of memory region to be mapped
* @rbase: RIO base address assigned to this window
* @size: Size of the memory region
* @rflags: Flags for mapping.
diff --git a/drivers/regulator/core.c b/drivers/regulator/core.c
index 5c4829cba6a6..e872c8be080e 100644
--- a/drivers/regulator/core.c
+++ b/drivers/regulator/core.c
@@ -1381,22 +1381,14 @@ struct regulator *regulator_get_exclusive(struct device *dev, const char *id)
}
EXPORT_SYMBOL_GPL(regulator_get_exclusive);
-/**
- * regulator_put - "free" the regulator source
- * @regulator: regulator source
- *
- * Note: drivers must ensure that all regulator_enable calls made on this
- * regulator source are balanced by regulator_disable calls prior to calling
- * this function.
- */
-void regulator_put(struct regulator *regulator)
+/* Locks held by regulator_put() */
+static void _regulator_put(struct regulator *regulator)
{
struct regulator_dev *rdev;
if (regulator == NULL || IS_ERR(regulator))
return;
- mutex_lock(&regulator_list_mutex);
rdev = regulator->rdev;
debugfs_remove_recursive(regulator->debugfs);
@@ -1412,6 +1404,20 @@ void regulator_put(struct regulator *regulator)
rdev->exclusive = 0;
module_put(rdev->owner);
+}
+
+/**
+ * regulator_put - "free" the regulator source
+ * @regulator: regulator source
+ *
+ * Note: drivers must ensure that all regulator_enable calls made on this
+ * regulator source are balanced by regulator_disable calls prior to calling
+ * this function.
+ */
+void regulator_put(struct regulator *regulator)
+{
+ mutex_lock(&regulator_list_mutex);
+ _regulator_put(regulator);
mutex_unlock(&regulator_list_mutex);
}
EXPORT_SYMBOL_GPL(regulator_put);
@@ -1974,7 +1980,7 @@ int regulator_is_supported_voltage(struct regulator *regulator,
if (!(rdev->constraints->valid_ops_mask & REGULATOR_CHANGE_VOLTAGE)) {
ret = regulator_get_voltage(regulator);
if (ret >= 0)
- return (min_uV >= ret && ret <= max_uV);
+ return (min_uV <= ret && ret <= max_uV);
else
return ret;
}
@@ -3365,7 +3371,7 @@ regulator_register(const struct regulator_desc *regulator_desc,
if (ret != 0) {
rdev_err(rdev, "Failed to request enable GPIO%d: %d\n",
config->ena_gpio, ret);
- goto clean;
+ goto wash;
}
rdev->ena_gpio = config->ena_gpio;
@@ -3445,10 +3451,11 @@ unset_supplies:
scrub:
if (rdev->supply)
- regulator_put(rdev->supply);
+ _regulator_put(rdev->supply);
if (rdev->ena_gpio)
gpio_free(rdev->ena_gpio);
kfree(rdev->constraints);
+wash:
device_unregister(&rdev->dev);
/* device core frees rdev */
rdev = ERR_PTR(ret);
diff --git a/drivers/s390/char/con3215.c b/drivers/s390/char/con3215.c
index 9ffb6d5f17aa..4ed343e4eb41 100644
--- a/drivers/s390/char/con3215.c
+++ b/drivers/s390/char/con3215.c
@@ -44,7 +44,6 @@
#define RAW3215_NR_CCWS 3
#define RAW3215_TIMEOUT HZ/10 /* time for delayed output */
-#define RAW3215_FIXED 1 /* 3215 console device is not be freed */
#define RAW3215_WORKING 4 /* set if a request is being worked on */
#define RAW3215_THROTTLED 8 /* set if reading is disabled */
#define RAW3215_STOPPED 16 /* set if writing is disabled */
@@ -339,8 +338,10 @@ static void raw3215_wakeup(unsigned long data)
struct tty_struct *tty;
tty = tty_port_tty_get(&raw->port);
- tty_wakeup(tty);
- tty_kref_put(tty);
+ if (tty) {
+ tty_wakeup(tty);
+ tty_kref_put(tty);
+ }
}
/*
@@ -629,8 +630,7 @@ static void raw3215_shutdown(struct raw3215_info *raw)
DECLARE_WAITQUEUE(wait, current);
unsigned long flags;
- if (!(raw->port.flags & ASYNC_INITIALIZED) ||
- (raw->flags & RAW3215_FIXED))
+ if (!(raw->port.flags & ASYNC_INITIALIZED))
return;
/* Wait for outstanding requests, then free irq */
spin_lock_irqsave(get_ccwdev_lock(raw->cdev), flags);
@@ -926,8 +926,6 @@ static int __init con3215_init(void)
dev_set_drvdata(&cdev->dev, raw);
cdev->handler = raw3215_irq;
- raw->flags |= RAW3215_FIXED;
-
/* Request the console irq */
if (raw3215_startup(raw) != 0) {
raw3215_free_info(raw);
diff --git a/drivers/s390/cio/css.h b/drivers/s390/cio/css.h
index 33bb4d891e16..4af3dfe70ef5 100644
--- a/drivers/s390/cio/css.h
+++ b/drivers/s390/cio/css.h
@@ -112,9 +112,6 @@ extern int for_each_subchannel(int(*fn)(struct subchannel_id, void *), void *);
extern void css_reiterate_subchannels(void);
void css_update_ssd_info(struct subchannel *sch);
-#define __MAX_SUBCHANNEL 65535
-#define __MAX_SSID 3
-
struct channel_subsystem {
u8 cssid;
int valid;
diff --git a/drivers/s390/cio/device.c b/drivers/s390/cio/device.c
index fc916f5d7314..fd3143c291c6 100644
--- a/drivers/s390/cio/device.c
+++ b/drivers/s390/cio/device.c
@@ -1424,7 +1424,7 @@ static enum io_sch_action sch_get_action(struct subchannel *sch)
}
if (device_is_disconnected(cdev))
return IO_SCH_REPROBE;
- if (cdev->online)
+ if (cdev->online && !cdev->private->flags.resuming)
return IO_SCH_VERIFY;
if (cdev->private->state == DEV_STATE_NOT_OPER)
return IO_SCH_UNREG_ATTACH;
@@ -1469,12 +1469,6 @@ static int io_subchannel_sch_event(struct subchannel *sch, int process)
rc = 0;
goto out_unlock;
case IO_SCH_VERIFY:
- if (cdev->private->flags.resuming == 1) {
- if (cio_enable_subchannel(sch, (u32)(addr_t)sch)) {
- ccw_device_set_notoper(cdev);
- break;
- }
- }
/* Trigger path verification. */
io_subchannel_verify(sch);
rc = 0;
diff --git a/drivers/s390/cio/idset.c b/drivers/s390/cio/idset.c
index 199bc6791177..65d13e38803f 100644
--- a/drivers/s390/cio/idset.c
+++ b/drivers/s390/cio/idset.c
@@ -125,8 +125,7 @@ int idset_is_empty(struct idset *set)
void idset_add_set(struct idset *to, struct idset *from)
{
- int len = min(__BITOPS_WORDS(to->num_ssid * to->num_id),
- __BITOPS_WORDS(from->num_ssid * from->num_id));
+ int len = min(to->num_ssid * to->num_id, from->num_ssid * from->num_id);
bitmap_or(to->bitmap, to->bitmap, from->bitmap, len);
}
diff --git a/drivers/s390/net/qeth_core_main.c b/drivers/s390/net/qeth_core_main.c
index 3e25d3150456..4d6ba00d0047 100644
--- a/drivers/s390/net/qeth_core_main.c
+++ b/drivers/s390/net/qeth_core_main.c
@@ -2942,13 +2942,33 @@ static int qeth_query_ipassists_cb(struct qeth_card *card,
QETH_DBF_TEXT(SETUP, 2, "qipasscb");
cmd = (struct qeth_ipa_cmd *) data;
+
+ switch (cmd->hdr.return_code) {
+ case IPA_RC_NOTSUPP:
+ case IPA_RC_L2_UNSUPPORTED_CMD:
+ QETH_DBF_TEXT(SETUP, 2, "ipaunsup");
+ card->options.ipa4.supported_funcs |= IPA_SETADAPTERPARMS;
+ card->options.ipa6.supported_funcs |= IPA_SETADAPTERPARMS;
+ return -0;
+ default:
+ if (cmd->hdr.return_code) {
+ QETH_DBF_MESSAGE(1, "%s IPA_CMD_QIPASSIST: Unhandled "
+ "rc=%d\n",
+ dev_name(&card->gdev->dev),
+ cmd->hdr.return_code);
+ return 0;
+ }
+ }
+
if (cmd->hdr.prot_version == QETH_PROT_IPV4) {
card->options.ipa4.supported_funcs = cmd->hdr.ipa_supported;
card->options.ipa4.enabled_funcs = cmd->hdr.ipa_enabled;
- } else {
+ } else if (cmd->hdr.prot_version == QETH_PROT_IPV6) {
card->options.ipa6.supported_funcs = cmd->hdr.ipa_supported;
card->options.ipa6.enabled_funcs = cmd->hdr.ipa_enabled;
- }
+ } else
+ QETH_DBF_MESSAGE(1, "%s IPA_CMD_QIPASSIST: Flawed LIC detected"
+ "\n", dev_name(&card->gdev->dev));
QETH_DBF_TEXT(SETUP, 2, "suppenbl");
QETH_DBF_TEXT_(SETUP, 2, "%08x", (__u32)cmd->hdr.ipa_supported);
QETH_DBF_TEXT_(SETUP, 2, "%08x", (__u32)cmd->hdr.ipa_enabled);
diff --git a/drivers/s390/net/qeth_l2_main.c b/drivers/s390/net/qeth_l2_main.c
index e67e0258aec5..fddb62654b6a 100644
--- a/drivers/s390/net/qeth_l2_main.c
+++ b/drivers/s390/net/qeth_l2_main.c
@@ -626,10 +626,13 @@ static int qeth_l2_request_initial_mac(struct qeth_card *card)
QETH_DBF_TEXT(SETUP, 2, "doL2init");
QETH_DBF_TEXT_(SETUP, 2, "doL2%s", CARD_BUS_ID(card));
- rc = qeth_query_setadapterparms(card);
- if (rc) {
- QETH_DBF_MESSAGE(2, "could not query adapter parameters on "
- "device %s: x%x\n", CARD_BUS_ID(card), rc);
+ if (qeth_is_supported(card, IPA_SETADAPTERPARMS)) {
+ rc = qeth_query_setadapterparms(card);
+ if (rc) {
+ QETH_DBF_MESSAGE(2, "could not query adapter "
+ "parameters on device %s: x%x\n",
+ CARD_BUS_ID(card), rc);
+ }
}
if (card->info.type == QETH_CARD_TYPE_IQD ||
@@ -676,7 +679,7 @@ static int qeth_l2_set_mac_address(struct net_device *dev, void *p)
return -ERESTARTSYS;
}
rc = qeth_l2_send_delmac(card, &card->dev->dev_addr[0]);
- if (!rc)
+ if (!rc || (rc == IPA_RC_L2_MAC_NOT_FOUND))
rc = qeth_l2_send_setmac(card, addr->sa_data);
return rc ? -EINVAL : 0;
}
diff --git a/drivers/scsi/isci/request.c b/drivers/scsi/isci/request.c
index c1bafc3f3fb1..9594ab62702b 100644
--- a/drivers/scsi/isci/request.c
+++ b/drivers/scsi/isci/request.c
@@ -1972,7 +1972,7 @@ sci_io_request_frame_handler(struct isci_request *ireq,
frame_index,
(void **)&frame_buffer);
- sci_controller_copy_sata_response(&ireq->stp.req,
+ sci_controller_copy_sata_response(&ireq->stp.rsp,
frame_header,
frame_buffer);
diff --git a/drivers/scsi/qlogicpti.c b/drivers/scsi/qlogicpti.c
index b191dd549207..71fddbc60f18 100644
--- a/drivers/scsi/qlogicpti.c
+++ b/drivers/scsi/qlogicpti.c
@@ -1294,26 +1294,19 @@ static struct scsi_host_template qpti_template = {
static const struct of_device_id qpti_match[];
static int __devinit qpti_sbus_probe(struct platform_device *op)
{
- const struct of_device_id *match;
- struct scsi_host_template *tpnt;
struct device_node *dp = op->dev.of_node;
struct Scsi_Host *host;
struct qlogicpti *qpti;
static int nqptis;
const char *fcode;
- match = of_match_device(qpti_match, &op->dev);
- if (!match)
- return -EINVAL;
- tpnt = match->data;
-
/* Sometimes Antares cards come up not completely
* setup, and we get a report of a zero IRQ.
*/
if (op->archdata.irqs[0] == 0)
return -ENODEV;
- host = scsi_host_alloc(tpnt, sizeof(struct qlogicpti));
+ host = scsi_host_alloc(&qpti_template, sizeof(struct qlogicpti));
if (!host)
return -ENOMEM;
@@ -1445,19 +1438,15 @@ static int __devexit qpti_sbus_remove(struct platform_device *op)
static const struct of_device_id qpti_match[] = {
{
.name = "ptisp",
- .data = &qpti_template,
},
{
.name = "PTI,ptisp",
- .data = &qpti_template,
},
{
.name = "QLGC,isp",
- .data = &qpti_template,
},
{
.name = "SUNW,isp",
- .data = &qpti_template,
},
{},
};
diff --git a/drivers/scsi/scsi.c b/drivers/scsi/scsi.c
index 2936b447cae9..2c0d0ec8150b 100644
--- a/drivers/scsi/scsi.c
+++ b/drivers/scsi/scsi.c
@@ -55,6 +55,7 @@
#include <linux/cpu.h>
#include <linux/mutex.h>
#include <linux/async.h>
+#include <asm/unaligned.h>
#include <scsi/scsi.h>
#include <scsi/scsi_cmnd.h>
@@ -1062,6 +1063,50 @@ int scsi_get_vpd_page(struct scsi_device *sdev, u8 page, unsigned char *buf,
EXPORT_SYMBOL_GPL(scsi_get_vpd_page);
/**
+ * scsi_report_opcode - Find out if a given command opcode is supported
+ * @sdev: scsi device to query
+ * @buffer: scratch buffer (must be at least 20 bytes long)
+ * @len: length of buffer
+ * @opcode: opcode for command to look up
+ *
+ * Uses the REPORT SUPPORTED OPERATION CODES to look up the given
+ * opcode. Returns 0 if RSOC fails or if the command opcode is
+ * unsupported. Returns 1 if the device claims to support the command.
+ */
+int scsi_report_opcode(struct scsi_device *sdev, unsigned char *buffer,
+ unsigned int len, unsigned char opcode)
+{
+ unsigned char cmd[16];
+ struct scsi_sense_hdr sshdr;
+ int result;
+
+ if (sdev->no_report_opcodes || sdev->scsi_level < SCSI_SPC_3)
+ return 0;
+
+ memset(cmd, 0, 16);
+ cmd[0] = MAINTENANCE_IN;
+ cmd[1] = MI_REPORT_SUPPORTED_OPERATION_CODES;
+ cmd[2] = 1; /* One command format */
+ cmd[3] = opcode;
+ put_unaligned_be32(len, &cmd[6]);
+ memset(buffer, 0, len);
+
+ result = scsi_execute_req(sdev, cmd, DMA_FROM_DEVICE, buffer, len,
+ &sshdr, 30 * HZ, 3, NULL);
+
+ if (result && scsi_sense_valid(&sshdr) &&
+ sshdr.sense_key == ILLEGAL_REQUEST &&
+ (sshdr.asc == 0x20 || sshdr.asc == 0x24) && sshdr.ascq == 0x00)
+ return 0;
+
+ if ((buffer[1] & 3) == 3) /* Command supported */
+ return 1;
+
+ return 0;
+}
+EXPORT_SYMBOL(scsi_report_opcode);
+
+/**
* scsi_device_get - get an additional reference to a scsi_device
* @sdev: device to get a reference to
*
diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
index da36a3a81a9e..9032e910bca3 100644
--- a/drivers/scsi/scsi_lib.c
+++ b/drivers/scsi/scsi_lib.c
@@ -900,11 +900,23 @@ void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes)
action = ACTION_FAIL;
error = -EILSEQ;
/* INVALID COMMAND OPCODE or INVALID FIELD IN CDB */
- } else if ((sshdr.asc == 0x20 || sshdr.asc == 0x24) &&
- (cmd->cmnd[0] == UNMAP ||
- cmd->cmnd[0] == WRITE_SAME_16 ||
- cmd->cmnd[0] == WRITE_SAME)) {
- description = "Discard failure";
+ } else if (sshdr.asc == 0x20 || sshdr.asc == 0x24) {
+ switch (cmd->cmnd[0]) {
+ case UNMAP:
+ description = "Discard failure";
+ break;
+ case WRITE_SAME:
+ case WRITE_SAME_16:
+ if (cmd->cmnd[1] & 0x8)
+ description = "Discard failure";
+ else
+ description =
+ "Write same failure";
+ break;
+ default:
+ description = "Invalid command failure";
+ break;
+ }
action = ACTION_FAIL;
error = -EREMOTEIO;
} else
diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
index 12f6fdfc1147..352bc77b7c88 100644
--- a/drivers/scsi/sd.c
+++ b/drivers/scsi/sd.c
@@ -99,6 +99,7 @@ MODULE_ALIAS_SCSI_DEVICE(TYPE_RBC);
#endif
static void sd_config_discard(struct scsi_disk *, unsigned int);
+static void sd_config_write_same(struct scsi_disk *);
static int sd_revalidate_disk(struct gendisk *);
static void sd_unlock_native_capacity(struct gendisk *disk);
static int sd_probe(struct device *);
@@ -395,6 +396,45 @@ sd_store_max_medium_access_timeouts(struct device *dev,
return err ? err : count;
}
+static ssize_t
+sd_show_write_same_blocks(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct scsi_disk *sdkp = to_scsi_disk(dev);
+
+ return snprintf(buf, 20, "%u\n", sdkp->max_ws_blocks);
+}
+
+static ssize_t
+sd_store_write_same_blocks(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct scsi_disk *sdkp = to_scsi_disk(dev);
+ struct scsi_device *sdp = sdkp->device;
+ unsigned long max;
+ int err;
+
+ if (!capable(CAP_SYS_ADMIN))
+ return -EACCES;
+
+ if (sdp->type != TYPE_DISK)
+ return -EINVAL;
+
+ err = kstrtoul(buf, 10, &max);
+
+ if (err)
+ return err;
+
+ if (max == 0)
+ sdp->no_write_same = 1;
+ else if (max <= SD_MAX_WS16_BLOCKS)
+ sdkp->max_ws_blocks = max;
+
+ sd_config_write_same(sdkp);
+
+ return count;
+}
+
static struct device_attribute sd_disk_attrs[] = {
__ATTR(cache_type, S_IRUGO|S_IWUSR, sd_show_cache_type,
sd_store_cache_type),
@@ -410,6 +450,8 @@ static struct device_attribute sd_disk_attrs[] = {
__ATTR(thin_provisioning, S_IRUGO, sd_show_thin_provisioning, NULL),
__ATTR(provisioning_mode, S_IRUGO|S_IWUSR, sd_show_provisioning_mode,
sd_store_provisioning_mode),
+ __ATTR(max_write_same_blocks, S_IRUGO|S_IWUSR,
+ sd_show_write_same_blocks, sd_store_write_same_blocks),
__ATTR(max_medium_access_timeouts, S_IRUGO|S_IWUSR,
sd_show_max_medium_access_timeouts,
sd_store_max_medium_access_timeouts),
@@ -561,19 +603,23 @@ static void sd_config_discard(struct scsi_disk *sdkp, unsigned int mode)
return;
case SD_LBP_UNMAP:
- max_blocks = min_not_zero(sdkp->max_unmap_blocks, 0xffffffff);
+ max_blocks = min_not_zero(sdkp->max_unmap_blocks,
+ (u32)SD_MAX_WS16_BLOCKS);
break;
case SD_LBP_WS16:
- max_blocks = min_not_zero(sdkp->max_ws_blocks, 0xffffffff);
+ max_blocks = min_not_zero(sdkp->max_ws_blocks,
+ (u32)SD_MAX_WS16_BLOCKS);
break;
case SD_LBP_WS10:
- max_blocks = min_not_zero(sdkp->max_ws_blocks, (u32)0xffff);
+ max_blocks = min_not_zero(sdkp->max_ws_blocks,
+ (u32)SD_MAX_WS10_BLOCKS);
break;
case SD_LBP_ZERO:
- max_blocks = min_not_zero(sdkp->max_ws_blocks, (u32)0xffff);
+ max_blocks = min_not_zero(sdkp->max_ws_blocks,
+ (u32)SD_MAX_WS10_BLOCKS);
q->limits.discard_zeroes_data = 1;
break;
}
@@ -583,29 +629,26 @@ static void sd_config_discard(struct scsi_disk *sdkp, unsigned int mode)
}
/**
- * scsi_setup_discard_cmnd - unmap blocks on thinly provisioned device
+ * sd_setup_discard_cmnd - unmap blocks on thinly provisioned device
* @sdp: scsi device to operate one
* @rq: Request to prepare
*
* Will issue either UNMAP or WRITE SAME(16) depending on preference
* indicated by target device.
**/
-static int scsi_setup_discard_cmnd(struct scsi_device *sdp, struct request *rq)
+static int sd_setup_discard_cmnd(struct scsi_device *sdp, struct request *rq)
{
struct scsi_disk *sdkp = scsi_disk(rq->rq_disk);
- struct bio *bio = rq->bio;
- sector_t sector = bio->bi_sector;
- unsigned int nr_sectors = bio_sectors(bio);
+ sector_t sector = blk_rq_pos(rq);
+ unsigned int nr_sectors = blk_rq_sectors(rq);
+ unsigned int nr_bytes = blk_rq_bytes(rq);
unsigned int len;
int ret;
char *buf;
struct page *page;
- if (sdkp->device->sector_size == 4096) {
- sector >>= 3;
- nr_sectors >>= 3;
- }
-
+ sector >>= ilog2(sdp->sector_size) - 9;
+ nr_sectors >>= ilog2(sdp->sector_size) - 9;
rq->timeout = SD_TIMEOUT;
memset(rq->cmd, 0, rq->cmd_len);
@@ -660,6 +703,7 @@ static int scsi_setup_discard_cmnd(struct scsi_device *sdp, struct request *rq)
blk_add_request_payload(rq, page, len);
ret = scsi_setup_blk_pc_cmnd(sdp, rq);
rq->buffer = page_address(page);
+ rq->__data_len = nr_bytes;
out:
if (ret != BLKPREP_OK) {
@@ -669,6 +713,83 @@ out:
return ret;
}
+static void sd_config_write_same(struct scsi_disk *sdkp)
+{
+ struct request_queue *q = sdkp->disk->queue;
+ unsigned int logical_block_size = sdkp->device->sector_size;
+ unsigned int blocks = 0;
+
+ if (sdkp->device->no_write_same) {
+ sdkp->max_ws_blocks = 0;
+ goto out;
+ }
+
+ /* Some devices can not handle block counts above 0xffff despite
+ * supporting WRITE SAME(16). Consequently we default to 64k
+ * blocks per I/O unless the device explicitly advertises a
+ * bigger limit.
+ */
+ if (sdkp->max_ws_blocks == 0)
+ sdkp->max_ws_blocks = SD_MAX_WS10_BLOCKS;
+
+ if (sdkp->ws16 || sdkp->max_ws_blocks > SD_MAX_WS10_BLOCKS)
+ blocks = min_not_zero(sdkp->max_ws_blocks,
+ (u32)SD_MAX_WS16_BLOCKS);
+ else
+ blocks = min_not_zero(sdkp->max_ws_blocks,
+ (u32)SD_MAX_WS10_BLOCKS);
+
+out:
+ blk_queue_max_write_same_sectors(q, blocks * (logical_block_size >> 9));
+}
+
+/**
+ * sd_setup_write_same_cmnd - write the same data to multiple blocks
+ * @sdp: scsi device to operate one
+ * @rq: Request to prepare
+ *
+ * Will issue either WRITE SAME(10) or WRITE SAME(16) depending on
+ * preference indicated by target device.
+ **/
+static int sd_setup_write_same_cmnd(struct scsi_device *sdp, struct request *rq)
+{
+ struct scsi_disk *sdkp = scsi_disk(rq->rq_disk);
+ struct bio *bio = rq->bio;
+ sector_t sector = blk_rq_pos(rq);
+ unsigned int nr_sectors = blk_rq_sectors(rq);
+ unsigned int nr_bytes = blk_rq_bytes(rq);
+ int ret;
+
+ if (sdkp->device->no_write_same)
+ return BLKPREP_KILL;
+
+ BUG_ON(bio_offset(bio) || bio_iovec(bio)->bv_len != sdp->sector_size);
+
+ sector >>= ilog2(sdp->sector_size) - 9;
+ nr_sectors >>= ilog2(sdp->sector_size) - 9;
+
+ rq->__data_len = sdp->sector_size;
+ rq->timeout = SD_WRITE_SAME_TIMEOUT;
+ memset(rq->cmd, 0, rq->cmd_len);
+
+ if (sdkp->ws16 || sector > 0xffffffff || nr_sectors > 0xffff) {
+ rq->cmd_len = 16;
+ rq->cmd[0] = WRITE_SAME_16;
+ put_unaligned_be64(sector, &rq->cmd[2]);
+ put_unaligned_be32(nr_sectors, &rq->cmd[10]);
+ } else {
+ rq->cmd_len = 10;
+ rq->cmd[0] = WRITE_SAME;
+ put_unaligned_be32(sector, &rq->cmd[2]);
+ put_unaligned_be16(nr_sectors, &rq->cmd[7]);
+ }
+
+ ret = scsi_setup_blk_pc_cmnd(sdp, rq);
+ rq->__data_len = nr_bytes;
+
+ return ret;
+}
+
static int scsi_setup_flush_cmnd(struct scsi_device *sdp, struct request *rq)
{
rq->timeout = SD_FLUSH_TIMEOUT;
@@ -712,7 +833,10 @@ static int sd_prep_fn(struct request_queue *q, struct request *rq)
* block PC requests to make life easier.
*/
if (rq->cmd_flags & REQ_DISCARD) {
- ret = scsi_setup_discard_cmnd(sdp, rq);
+ ret = sd_setup_discard_cmnd(sdp, rq);
+ goto out;
+ } else if (rq->cmd_flags & REQ_WRITE_SAME) {
+ ret = sd_setup_write_same_cmnd(sdp, rq);
goto out;
} else if (rq->cmd_flags & REQ_FLUSH) {
ret = scsi_setup_flush_cmnd(sdp, rq);
@@ -1482,12 +1606,21 @@ static int sd_done(struct scsi_cmnd *SCpnt)
unsigned int good_bytes = result ? 0 : scsi_bufflen(SCpnt);
struct scsi_sense_hdr sshdr;
struct scsi_disk *sdkp = scsi_disk(SCpnt->request->rq_disk);
+ struct request *req = SCpnt->request;
int sense_valid = 0;
int sense_deferred = 0;
unsigned char op = SCpnt->cmnd[0];
+ unsigned char unmap = SCpnt->cmnd[1] & 8;
- if ((SCpnt->request->cmd_flags & REQ_DISCARD) && !result)
- scsi_set_resid(SCpnt, 0);
+ if (req->cmd_flags & REQ_DISCARD || req->cmd_flags & REQ_WRITE_SAME) {
+ if (!result) {
+ good_bytes = blk_rq_bytes(req);
+ scsi_set_resid(SCpnt, 0);
+ } else {
+ good_bytes = 0;
+ scsi_set_resid(SCpnt, blk_rq_bytes(req));
+ }
+ }
if (result) {
sense_valid = scsi_command_normalize_sense(SCpnt, &sshdr);
@@ -1536,9 +1669,25 @@ static int sd_done(struct scsi_cmnd *SCpnt)
if (sshdr.asc == 0x10) /* DIX: Host detected corruption */
good_bytes = sd_completed_bytes(SCpnt);
/* INVALID COMMAND OPCODE or INVALID FIELD IN CDB */
- if ((sshdr.asc == 0x20 || sshdr.asc == 0x24) &&
- (op == UNMAP || op == WRITE_SAME_16 || op == WRITE_SAME))
- sd_config_discard(sdkp, SD_LBP_DISABLE);
+ if (sshdr.asc == 0x20 || sshdr.asc == 0x24) {
+ switch (op) {
+ case UNMAP:
+ sd_config_discard(sdkp, SD_LBP_DISABLE);
+ break;
+ case WRITE_SAME_16:
+ case WRITE_SAME:
+ if (unmap)
+ sd_config_discard(sdkp, SD_LBP_DISABLE);
+ else {
+ sdkp->device->no_write_same = 1;
+ sd_config_write_same(sdkp);
+
+ good_bytes = 0;
+ req->__data_len = blk_rq_bytes(req);
+ req->cmd_flags |= REQ_QUIET;
+ }
+ }
+ }
break;
default:
break;
@@ -2374,9 +2523,7 @@ static void sd_read_block_limits(struct scsi_disk *sdkp)
if (buffer[3] == 0x3c) {
unsigned int lba_count, desc_count;
- sdkp->max_ws_blocks =
- (u32) min_not_zero(get_unaligned_be64(&buffer[36]),
- (u64)0xffffffff);
+ sdkp->max_ws_blocks = (u32)get_unaligned_be64(&buffer[36]);
if (!sdkp->lbpme)
goto out;
@@ -2469,6 +2616,13 @@ static void sd_read_block_provisioning(struct scsi_disk *sdkp)
kfree(buffer);
}
+static void sd_read_write_same(struct scsi_disk *sdkp, unsigned char *buffer)
+{
+ if (scsi_report_opcode(sdkp->device, buffer, SD_BUF_SIZE,
+ WRITE_SAME_16))
+ sdkp->ws16 = 1;
+}
+
static int sd_try_extended_inquiry(struct scsi_device *sdp)
{
/*
@@ -2528,6 +2682,7 @@ static int sd_revalidate_disk(struct gendisk *disk)
sd_read_write_protect_flag(sdkp, buffer);
sd_read_cache_type(sdkp, buffer);
sd_read_app_tag_own(sdkp, buffer);
+ sd_read_write_same(sdkp, buffer);
}
sdkp->first_scan = 0;
@@ -2545,6 +2700,7 @@ static int sd_revalidate_disk(struct gendisk *disk)
blk_queue_flush(sdkp->disk->queue, flush);
set_capacity(disk, sdkp->capacity);
+ sd_config_write_same(sdkp);
kfree(buffer);
out:
diff --git a/drivers/scsi/sd.h b/drivers/scsi/sd.h
index 47c52a6d733c..74a1e4ca5401 100644
--- a/drivers/scsi/sd.h
+++ b/drivers/scsi/sd.h
@@ -14,6 +14,7 @@
#define SD_TIMEOUT (30 * HZ)
#define SD_MOD_TIMEOUT (75 * HZ)
#define SD_FLUSH_TIMEOUT (60 * HZ)
+#define SD_WRITE_SAME_TIMEOUT (120 * HZ)
/*
* Number of allowed retries
@@ -39,6 +40,11 @@ enum {
};
enum {
+ SD_MAX_WS10_BLOCKS = 0xffff,
+ SD_MAX_WS16_BLOCKS = 0x7fffff,
+};
+
+enum {
SD_LBP_FULL = 0, /* Full logical block provisioning */
SD_LBP_UNMAP, /* Use UNMAP command */
SD_LBP_WS16, /* Use WRITE SAME(16) with UNMAP bit */
@@ -77,6 +83,7 @@ struct scsi_disk {
unsigned lbpws : 1;
unsigned lbpws10 : 1;
unsigned lbpvpd : 1;
+ unsigned ws16 : 1;
};
#define to_scsi_disk(obj) container_of(obj,struct scsi_disk,dev)
diff --git a/drivers/staging/android/android_alarm.h b/drivers/staging/android/android_alarm.h
index f2ffd963f1c3..d0cafd637199 100644
--- a/drivers/staging/android/android_alarm.h
+++ b/drivers/staging/android/android_alarm.h
@@ -51,12 +51,10 @@ enum android_alarm_return_flags {
#define ANDROID_ALARM_WAIT _IO('a', 1)
#define ALARM_IOW(c, type, size) _IOW('a', (c) | ((type) << 4), size)
-#define ALARM_IOR(c, type, size) _IOR('a', (c) | ((type) << 4), size)
-
/* Set alarm */
#define ANDROID_ALARM_SET(type) ALARM_IOW(2, type, struct timespec)
#define ANDROID_ALARM_SET_AND_WAIT(type) ALARM_IOW(3, type, struct timespec)
-#define ANDROID_ALARM_GET_TIME(type) ALARM_IOR(4, type, struct timespec)
+#define ANDROID_ALARM_GET_TIME(type) ALARM_IOW(4, type, struct timespec)
#define ANDROID_ALARM_SET_RTC _IOW('a', 5, struct timespec)
#define ANDROID_ALARM_BASE_CMD(cmd) (cmd & ~(_IOC(0, 0, 0xf0, 0)))
#define ANDROID_ALARM_IOCTL_TO_TYPE(cmd) (_IOC_NR(cmd) >> 4)
diff --git a/drivers/tty/hvc/hvc_console.c b/drivers/tty/hvc/hvc_console.c
index a5dec1ca1b82..13ee53bd0bf6 100644
--- a/drivers/tty/hvc/hvc_console.c
+++ b/drivers/tty/hvc/hvc_console.c
@@ -424,7 +424,6 @@ static void hvc_hangup(struct tty_struct *tty)
{
struct hvc_struct *hp = tty->driver_data;
unsigned long flags;
- int temp_open_count;
if (!hp)
return;
@@ -444,7 +443,6 @@ static void hvc_hangup(struct tty_struct *tty)
return;
}
- temp_open_count = hp->port.count;
hp->port.count = 0;
spin_unlock_irqrestore(&hp->port.lock, flags);
tty_port_tty_set(&hp->port, NULL);
@@ -453,11 +451,6 @@ static void hvc_hangup(struct tty_struct *tty)
if (hp->ops->notifier_hangup)
hp->ops->notifier_hangup(hp, hp->data);
-
- while(temp_open_count) {
- --temp_open_count;
- tty_port_put(&hp->port);
- }
}
/*
diff --git a/drivers/tty/serial/max310x.c b/drivers/tty/serial/max310x.c
index 2bc28a59d385..1ab1d2c66de4 100644
--- a/drivers/tty/serial/max310x.c
+++ b/drivers/tty/serial/max310x.c
@@ -1239,6 +1239,7 @@ static int __devexit max310x_remove(struct spi_device *spi)
static const struct spi_device_id max310x_id_table[] = {
{ "max3107", MAX310X_TYPE_MAX3107 },
{ "max3108", MAX310X_TYPE_MAX3108 },
+ { }
};
MODULE_DEVICE_TABLE(spi, max310x_id_table);
diff --git a/drivers/usb/core/hcd.c b/drivers/usb/core/hcd.c
index 1e741bca0265..f034716190ff 100644
--- a/drivers/usb/core/hcd.c
+++ b/drivers/usb/core/hcd.c
@@ -2151,8 +2151,15 @@ EXPORT_SYMBOL_GPL(usb_bus_start_enum);
irqreturn_t usb_hcd_irq (int irq, void *__hcd)
{
struct usb_hcd *hcd = __hcd;
+ unsigned long flags;
irqreturn_t rc;
+ /* IRQF_DISABLED doesn't work correctly with shared IRQs
+ * when the first handler doesn't use it. So let's just
+ * assume it's never used.
+ */
+ local_irq_save(flags);
+
if (unlikely(HCD_DEAD(hcd) || !HCD_HW_ACCESSIBLE(hcd)))
rc = IRQ_NONE;
else if (hcd->driver->irq(hcd) == IRQ_NONE)
@@ -2160,6 +2167,7 @@ irqreturn_t usb_hcd_irq (int irq, void *__hcd)
else
rc = IRQ_HANDLED;
+ local_irq_restore(flags);
return rc;
}
EXPORT_SYMBOL_GPL(usb_hcd_irq);
@@ -2347,6 +2355,14 @@ static int usb_hcd_request_irqs(struct usb_hcd *hcd,
int retval;
if (hcd->driver->irq) {
+
+ /* IRQF_DISABLED doesn't work as advertised when used together
+ * with IRQF_SHARED. As usb_hcd_irq() will always disable
+ * interrupts we can remove it here.
+ */
+ if (irqflags & IRQF_SHARED)
+ irqflags &= ~IRQF_DISABLED;
+
snprintf(hcd->irq_descr, sizeof(hcd->irq_descr), "%s:usb%d",
hcd->driver->description, hcd->self.busnum);
retval = request_irq(irqnum, &usb_hcd_irq, irqflags,
diff --git a/drivers/usb/early/ehci-dbgp.c b/drivers/usb/early/ehci-dbgp.c
index e426ad626d74..4bfa78af379c 100644
--- a/drivers/usb/early/ehci-dbgp.c
+++ b/drivers/usb/early/ehci-dbgp.c
@@ -20,6 +20,7 @@
#include <linux/usb/ehci_def.h>
#include <linux/delay.h>
#include <linux/serial_core.h>
+#include <linux/kconfig.h>
#include <linux/kgdb.h>
#include <linux/kthread.h>
#include <asm/io.h>
@@ -614,12 +615,6 @@ err:
return -ENODEV;
}
-int dbgp_external_startup(struct usb_hcd *hcd)
-{
- return xen_dbgp_external_startup(hcd) ?: _dbgp_external_startup();
-}
-EXPORT_SYMBOL_GPL(dbgp_external_startup);
-
static int ehci_reset_port(int port)
{
u32 portsc;
@@ -979,6 +974,7 @@ struct console early_dbgp_console = {
.index = -1,
};
+#if IS_ENABLED(CONFIG_USB_EHCI_HCD)
int dbgp_reset_prep(struct usb_hcd *hcd)
{
int ret = xen_dbgp_reset_prep(hcd);
@@ -1007,6 +1003,13 @@ int dbgp_reset_prep(struct usb_hcd *hcd)
}
EXPORT_SYMBOL_GPL(dbgp_reset_prep);
+int dbgp_external_startup(struct usb_hcd *hcd)
+{
+ return xen_dbgp_external_startup(hcd) ?: _dbgp_external_startup();
+}
+EXPORT_SYMBOL_GPL(dbgp_external_startup);
+#endif /* USB_EHCI_HCD */
+
#ifdef CONFIG_KGDB
static char kgdbdbgp_buf[DBGP_MAX_PACKET];
diff --git a/drivers/usb/gadget/u_ether.c b/drivers/usb/gadget/u_ether.c
index 6458764994ef..4ec3c0d7a18b 100644
--- a/drivers/usb/gadget/u_ether.c
+++ b/drivers/usb/gadget/u_ether.c
@@ -20,6 +20,7 @@
#include <linux/ctype.h>
#include <linux/etherdevice.h>
#include <linux/ethtool.h>
+#include <linux/if_vlan.h>
#include "u_ether.h"
@@ -295,7 +296,7 @@ static void rx_complete(struct usb_ep *ep, struct usb_request *req)
while (skb2) {
if (status < 0
|| ETH_HLEN > skb2->len
- || skb2->len > ETH_FRAME_LEN) {
+ || skb2->len > VLAN_ETH_FRAME_LEN) {
dev->net->stats.rx_errors++;
dev->net->stats.rx_length_errors++;
DBG(dev, "rx length %d\n", skb2->len);
diff --git a/drivers/usb/host/ehci-ls1x.c b/drivers/usb/host/ehci-ls1x.c
index ca759652626b..aa0f328922df 100644
--- a/drivers/usb/host/ehci-ls1x.c
+++ b/drivers/usb/host/ehci-ls1x.c
@@ -113,7 +113,7 @@ static int ehci_hcd_ls1x_probe(struct platform_device *pdev)
goto err_put_hcd;
}
- ret = usb_add_hcd(hcd, irq, IRQF_SHARED);
+ ret = usb_add_hcd(hcd, irq, IRQF_DISABLED | IRQF_SHARED);
if (ret)
goto err_put_hcd;
diff --git a/drivers/usb/host/ohci-xls.c b/drivers/usb/host/ohci-xls.c
index 84201cd1a472..41e378f17c66 100644
--- a/drivers/usb/host/ohci-xls.c
+++ b/drivers/usb/host/ohci-xls.c
@@ -56,7 +56,7 @@ static int ohci_xls_probe_internal(const struct hc_driver *driver,
goto err3;
}
- retval = usb_add_hcd(hcd, irq, IRQF_SHARED);
+ retval = usb_add_hcd(hcd, irq, IRQF_DISABLED | IRQF_SHARED);
if (retval != 0)
goto err4;
return retval;
diff --git a/drivers/usb/musb/musb_gadget.c b/drivers/usb/musb/musb_gadget.c
index d0b87e7b4abf..b6b84dacc791 100644
--- a/drivers/usb/musb/musb_gadget.c
+++ b/drivers/usb/musb/musb_gadget.c
@@ -707,11 +707,12 @@ static void rxstate(struct musb *musb, struct musb_request *req)
fifo_count = musb_readw(epio, MUSB_RXCOUNT);
/*
- * use mode 1 only if we expect data of at least ep packet_sz
- * and have not yet received a short packet
+ * Enable Mode 1 on RX transfers only when short_not_ok flag
+ * is set. Currently short_not_ok flag is set only from
+ * file_storage and f_mass_storage drivers
*/
- if ((request->length - request->actual >= musb_ep->packet_sz) &&
- (fifo_count >= musb_ep->packet_sz))
+
+ if (request->short_not_ok && fifo_count == musb_ep->packet_sz)
use_mode_1 = 1;
else
use_mode_1 = 0;
@@ -727,6 +728,27 @@ static void rxstate(struct musb *musb, struct musb_request *req)
c = musb->dma_controller;
channel = musb_ep->dma;
+ /* We use DMA Req mode 0 in rx_csr, and DMA controller operates in
+ * mode 0 only. So we do not get endpoint interrupts due to DMA
+ * completion. We only get interrupts from DMA controller.
+ *
+ * We could operate in DMA mode 1 if we knew the size of the tranfer
+ * in advance. For mass storage class, request->length = what the host
+ * sends, so that'd work. But for pretty much everything else,
+ * request->length is routinely more than what the host sends. For
+ * most these gadgets, end of is signified either by a short packet,
+ * or filling the last byte of the buffer. (Sending extra data in
+ * that last pckate should trigger an overflow fault.) But in mode 1,
+ * we don't get DMA completion interrupt for short packets.
+ *
+ * Theoretically, we could enable DMAReq irq (MUSB_RXCSR_DMAMODE = 1),
+ * to get endpoint interrupt on every DMA req, but that didn't seem
+ * to work reliably.
+ *
+ * REVISIT an updated g_file_storage can set req->short_not_ok, which
+ * then becomes usable as a runtime "use mode 1" hint...
+ */
+
/* Experimental: Mode1 works with mass storage use cases */
if (use_mode_1) {
csr |= MUSB_RXCSR_AUTOCLEAR;
diff --git a/drivers/usb/musb/ux500.c b/drivers/usb/musb/ux500.c
index d62a91fedc22..0e62f504410e 100644
--- a/drivers/usb/musb/ux500.c
+++ b/drivers/usb/musb/ux500.c
@@ -65,7 +65,7 @@ static int __devinit ux500_probe(struct platform_device *pdev)
struct platform_device *musb;
struct ux500_glue *glue;
struct clk *clk;
-
+ int musbid;
int ret = -ENOMEM;
glue = kzalloc(sizeof(*glue), GFP_KERNEL);
diff --git a/drivers/usb/otg/Kconfig b/drivers/usb/otg/Kconfig
index d8c8a42bff3e..6223062d5d1b 100644
--- a/drivers/usb/otg/Kconfig
+++ b/drivers/usb/otg/Kconfig
@@ -58,7 +58,7 @@ config USB_ULPI_VIEWPORT
config TWL4030_USB
tristate "TWL4030 USB Transceiver Driver"
- depends on TWL4030_CORE && REGULATOR_TWL4030
+ depends on TWL4030_CORE && REGULATOR_TWL4030 && USB_MUSB_OMAP2PLUS
select USB_OTG_UTILS
help
Enable this to support the USB OTG transceiver on TWL4030
@@ -68,7 +68,7 @@ config TWL4030_USB
config TWL6030_USB
tristate "TWL6030 USB Transceiver Driver"
- depends on TWL4030_CORE && OMAP_USB2
+ depends on TWL4030_CORE && OMAP_USB2 && USB_MUSB_OMAP2PLUS
select USB_OTG_UTILS
help
Enable this to support the USB OTG transceiver on TWL6030
diff --git a/drivers/usb/serial/keyspan.c b/drivers/usb/serial/keyspan.c
index 7179b0c5f814..cff8dd5b462d 100644
--- a/drivers/usb/serial/keyspan.c
+++ b/drivers/usb/serial/keyspan.c
@@ -2430,7 +2430,7 @@ static void keyspan_release(struct usb_serial *serial)
static int keyspan_port_probe(struct usb_serial_port *port)
{
struct usb_serial *serial = port->serial;
- struct keyspan_port_private *s_priv;
+ struct keyspan_serial_private *s_priv;
struct keyspan_port_private *p_priv;
const struct keyspan_device_details *d_details;
struct callbacks *cback;
@@ -2445,7 +2445,6 @@ static int keyspan_port_probe(struct usb_serial_port *port)
if (!p_priv)
return -ENOMEM;
- s_priv = usb_get_serial_data(port->serial);
p_priv->device_details = d_details;
/* Setup values for the various callback routines */
diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
index 5dee7d61241e..edc64bb6f457 100644
--- a/drivers/usb/serial/option.c
+++ b/drivers/usb/serial/option.c
@@ -158,6 +158,7 @@ static void option_instat_callback(struct urb *urb);
#define NOVATELWIRELESS_PRODUCT_EVDO_EMBEDDED_HIGHSPEED 0x8001
#define NOVATELWIRELESS_PRODUCT_HSPA_EMBEDDED_FULLSPEED 0x9000
#define NOVATELWIRELESS_PRODUCT_HSPA_EMBEDDED_HIGHSPEED 0x9001
+#define NOVATELWIRELESS_PRODUCT_E362 0x9010
#define NOVATELWIRELESS_PRODUCT_G1 0xA001
#define NOVATELWIRELESS_PRODUCT_G1_M 0xA002
#define NOVATELWIRELESS_PRODUCT_G2 0xA010
@@ -193,6 +194,9 @@ static void option_instat_callback(struct urb *urb);
#define DELL_PRODUCT_5730_MINICARD_TELUS 0x8181
#define DELL_PRODUCT_5730_MINICARD_VZW 0x8182
+#define DELL_PRODUCT_5800_MINICARD_VZW 0x8195 /* Novatel E362 */
+#define DELL_PRODUCT_5800_V2_MINICARD_VZW 0x8196 /* Novatel E362 */
+
#define KYOCERA_VENDOR_ID 0x0c88
#define KYOCERA_PRODUCT_KPC650 0x17da
#define KYOCERA_PRODUCT_KPC680 0x180a
@@ -283,6 +287,7 @@ static void option_instat_callback(struct urb *urb);
/* ALCATEL PRODUCTS */
#define ALCATEL_VENDOR_ID 0x1bbb
#define ALCATEL_PRODUCT_X060S_X200 0x0000
+#define ALCATEL_PRODUCT_X220_X500D 0x0017
#define PIRELLI_VENDOR_ID 0x1266
#define PIRELLI_PRODUCT_C100_1 0x1002
@@ -706,6 +711,7 @@ static const struct usb_device_id option_ids[] = {
{ USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_G2) },
/* Novatel Ovation MC551 a.k.a. Verizon USB551L */
{ USB_DEVICE_AND_INTERFACE_INFO(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_MC551, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_E362, 0xff, 0xff, 0xff) },
{ USB_DEVICE(AMOI_VENDOR_ID, AMOI_PRODUCT_H01) },
{ USB_DEVICE(AMOI_VENDOR_ID, AMOI_PRODUCT_H01A) },
@@ -728,6 +734,8 @@ static const struct usb_device_id option_ids[] = {
{ USB_DEVICE(DELL_VENDOR_ID, DELL_PRODUCT_5730_MINICARD_SPRINT) }, /* Dell Wireless 5730 Mobile Broadband EVDO/HSPA Mini-Card */
{ USB_DEVICE(DELL_VENDOR_ID, DELL_PRODUCT_5730_MINICARD_TELUS) }, /* Dell Wireless 5730 Mobile Broadband EVDO/HSPA Mini-Card */
{ USB_DEVICE(DELL_VENDOR_ID, DELL_PRODUCT_5730_MINICARD_VZW) }, /* Dell Wireless 5730 Mobile Broadband EVDO/HSPA Mini-Card */
+ { USB_DEVICE_AND_INTERFACE_INFO(DELL_VENDOR_ID, DELL_PRODUCT_5800_MINICARD_VZW, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(DELL_VENDOR_ID, DELL_PRODUCT_5800_V2_MINICARD_VZW, 0xff, 0xff, 0xff) },
{ USB_DEVICE(ANYDATA_VENDOR_ID, ANYDATA_PRODUCT_ADU_E100A) }, /* ADU-E100, ADU-310 */
{ USB_DEVICE(ANYDATA_VENDOR_ID, ANYDATA_PRODUCT_ADU_500A) },
{ USB_DEVICE(ANYDATA_VENDOR_ID, ANYDATA_PRODUCT_ADU_620UW) },
@@ -1157,6 +1165,7 @@ static const struct usb_device_id option_ids[] = {
{ USB_DEVICE(ALCATEL_VENDOR_ID, ALCATEL_PRODUCT_X060S_X200),
.driver_info = (kernel_ulong_t)&alcatel_x200_blacklist
},
+ { USB_DEVICE(ALCATEL_VENDOR_ID, ALCATEL_PRODUCT_X220_X500D) },
{ USB_DEVICE(AIRPLUS_VENDOR_ID, AIRPLUS_PRODUCT_MCD650) },
{ USB_DEVICE(TLAYTECH_VENDOR_ID, TLAYTECH_PRODUCT_TEU800) },
{ USB_DEVICE(LONGCHEER_VENDOR_ID, FOUR_G_SYSTEMS_PRODUCT_W14),
diff --git a/drivers/usb/serial/usb_wwan.c b/drivers/usb/serial/usb_wwan.c
index 61a73ad1a187..a3e9c095f0d8 100644
--- a/drivers/usb/serial/usb_wwan.c
+++ b/drivers/usb/serial/usb_wwan.c
@@ -455,9 +455,6 @@ static struct urb *usb_wwan_setup_urb(struct usb_serial_port *port,
struct usb_serial *serial = port->serial;
struct urb *urb;
- if (endpoint == -1)
- return NULL; /* endpoint not needed */
-
urb = usb_alloc_urb(0, GFP_KERNEL); /* No ISO */
if (urb == NULL) {
dev_dbg(&serial->interface->dev,
@@ -489,6 +486,9 @@ int usb_wwan_port_probe(struct usb_serial_port *port)
init_usb_anchor(&portdata->delayed);
for (i = 0; i < N_IN_URB; i++) {
+ if (!port->bulk_in_size)
+ break;
+
buffer = (u8 *)__get_free_page(GFP_KERNEL);
if (!buffer)
goto bail_out_error;
@@ -502,8 +502,8 @@ int usb_wwan_port_probe(struct usb_serial_port *port)
}
for (i = 0; i < N_OUT_URB; i++) {
- if (port->bulk_out_endpointAddress == -1)
- continue;
+ if (!port->bulk_out_size)
+ break;
buffer = kmalloc(OUT_BUFLEN, GFP_KERNEL);
if (!buffer)
diff --git a/drivers/usb/storage/scsiglue.c b/drivers/usb/storage/scsiglue.c
index a3d54366afcc..92f35abee92d 100644
--- a/drivers/usb/storage/scsiglue.c
+++ b/drivers/usb/storage/scsiglue.c
@@ -186,6 +186,12 @@ static int slave_configure(struct scsi_device *sdev)
/* Some devices don't handle VPD pages correctly */
sdev->skip_vpd_pages = 1;
+ /* Do not attempt to use REPORT SUPPORTED OPERATION CODES */
+ sdev->no_report_opcodes = 1;
+
+ /* Do not attempt to use WRITE SAME */
+ sdev->no_write_same = 1;
+
/* Some disks return the total number of blocks in response
* to READ CAPACITY rather than the highest block number.
* If this device makes that mistake, tell the sd driver. */
diff --git a/drivers/video/omap2/dss/dsi.c b/drivers/video/omap2/dss/dsi.c
index d64ac3842884..bee92846cfab 100644
--- a/drivers/video/omap2/dss/dsi.c
+++ b/drivers/video/omap2/dss/dsi.c
@@ -365,11 +365,20 @@ struct platform_device *dsi_get_dsidev_from_id(int module)
struct omap_dss_output *out;
enum omap_dss_output_id id;
- id = module == 0 ? OMAP_DSS_OUTPUT_DSI1 : OMAP_DSS_OUTPUT_DSI2;
+ switch (module) {
+ case 0:
+ id = OMAP_DSS_OUTPUT_DSI1;
+ break;
+ case 1:
+ id = OMAP_DSS_OUTPUT_DSI2;
+ break;
+ default:
+ return NULL;
+ }
out = omap_dss_get_output(id);
- return out->pdev;
+ return out ? out->pdev : NULL;
}
static inline void dsi_write_reg(struct platform_device *dsidev,
diff --git a/drivers/video/omap2/dss/dss.c b/drivers/video/omap2/dss/dss.c
index 2ab1c3e96553..5f6eea801b06 100644
--- a/drivers/video/omap2/dss/dss.c
+++ b/drivers/video/omap2/dss/dss.c
@@ -697,11 +697,15 @@ static int dss_get_clocks(void)
dss.dss_clk = clk;
- clk = clk_get(NULL, dss.feat->clk_name);
- if (IS_ERR(clk)) {
- DSSERR("Failed to get %s\n", dss.feat->clk_name);
- r = PTR_ERR(clk);
- goto err;
+ if (dss.feat->clk_name) {
+ clk = clk_get(NULL, dss.feat->clk_name);
+ if (IS_ERR(clk)) {
+ DSSERR("Failed to get %s\n", dss.feat->clk_name);
+ r = PTR_ERR(clk);
+ goto err;
+ }
+ } else {
+ clk = NULL;
}
dss.dpll4_m4_ck = clk;
@@ -805,10 +809,10 @@ static int __init dss_init_features(struct device *dev)
if (cpu_is_omap24xx())
src = &omap24xx_dss_feats;
- else if (cpu_is_omap34xx())
- src = &omap34xx_dss_feats;
else if (cpu_is_omap3630())
src = &omap3630_dss_feats;
+ else if (cpu_is_omap34xx())
+ src = &omap34xx_dss_feats;
else if (cpu_is_omap44xx())
src = &omap44xx_dss_feats;
else if (soc_is_omap54xx())
diff --git a/drivers/video/omap2/dss/hdmi.c b/drivers/video/omap2/dss/hdmi.c
index a48a7dd75b33..8c9b8b3b7f77 100644
--- a/drivers/video/omap2/dss/hdmi.c
+++ b/drivers/video/omap2/dss/hdmi.c
@@ -644,8 +644,10 @@ static void hdmi_dump_regs(struct seq_file *s)
{
mutex_lock(&hdmi.lock);
- if (hdmi_runtime_get())
+ if (hdmi_runtime_get()) {
+ mutex_unlock(&hdmi.lock);
return;
+ }
hdmi.ip_data.ops->dump_wrapper(&hdmi.ip_data, s);
hdmi.ip_data.ops->dump_pll(&hdmi.ip_data, s);
diff --git a/drivers/video/omap2/omapfb/omapfb-ioctl.c b/drivers/video/omap2/omapfb/omapfb-ioctl.c
index 606b89f12351..d630b26a005c 100644
--- a/drivers/video/omap2/omapfb/omapfb-ioctl.c
+++ b/drivers/video/omap2/omapfb/omapfb-ioctl.c
@@ -787,7 +787,7 @@ int omapfb_ioctl(struct fb_info *fbi, unsigned int cmd, unsigned long arg)
case OMAPFB_WAITFORVSYNC:
DBG("ioctl WAITFORVSYNC\n");
- if (!display && !display->output && !display->output->manager) {
+ if (!display || !display->output || !display->output->manager) {
r = -EINVAL;
break;
}
diff --git a/drivers/virtio/virtio.c b/drivers/virtio/virtio.c
index 1e8659ca27ef..809b0de59c09 100644
--- a/drivers/virtio/virtio.c
+++ b/drivers/virtio/virtio.c
@@ -225,8 +225,10 @@ EXPORT_SYMBOL_GPL(register_virtio_device);
void unregister_virtio_device(struct virtio_device *dev)
{
+ int index = dev->index; /* save for after device release */
+
device_unregister(&dev->dev);
- ida_simple_remove(&virtio_index_ida, dev->index);
+ ida_simple_remove(&virtio_index_ida, index);
}
EXPORT_SYMBOL_GPL(unregister_virtio_device);
diff --git a/drivers/xen/Makefile b/drivers/xen/Makefile
index 0e8637035457..74354708c6c4 100644
--- a/drivers/xen/Makefile
+++ b/drivers/xen/Makefile
@@ -2,6 +2,7 @@ ifneq ($(CONFIG_ARM),y)
obj-y += manage.o balloon.o
obj-$(CONFIG_HOTPLUG_CPU) += cpu_hotplug.o
endif
+obj-$(CONFIG_X86) += fallback.o
obj-y += grant-table.o features.o events.o
obj-y += xenbus/
diff --git a/drivers/xen/events.c b/drivers/xen/events.c
index 912ac81b6dbf..0be4df39e953 100644
--- a/drivers/xen/events.c
+++ b/drivers/xen/events.c
@@ -1395,10 +1395,10 @@ void xen_evtchn_do_upcall(struct pt_regs *regs)
{
struct pt_regs *old_regs = set_irq_regs(regs);
+ irq_enter();
#ifdef CONFIG_X86
exit_idle();
#endif
- irq_enter();
__xen_evtchn_do_upcall();
diff --git a/drivers/xen/fallback.c b/drivers/xen/fallback.c
new file mode 100644
index 000000000000..0ef7c4d40f86
--- /dev/null
+++ b/drivers/xen/fallback.c
@@ -0,0 +1,80 @@
+#include <linux/kernel.h>
+#include <linux/string.h>
+#include <linux/bug.h>
+#include <linux/export.h>
+#include <asm/hypervisor.h>
+#include <asm/xen/hypercall.h>
+
+int xen_event_channel_op_compat(int cmd, void *arg)
+{
+ struct evtchn_op op;
+ int rc;
+
+ op.cmd = cmd;
+ memcpy(&op.u, arg, sizeof(op.u));
+ rc = _hypercall1(int, event_channel_op_compat, &op);
+
+ switch (cmd) {
+ case EVTCHNOP_close:
+ case EVTCHNOP_send:
+ case EVTCHNOP_bind_vcpu:
+ case EVTCHNOP_unmask:
+ /* no output */
+ break;
+
+#define COPY_BACK(eop) \
+ case EVTCHNOP_##eop: \
+ memcpy(arg, &op.u.eop, sizeof(op.u.eop)); \
+ break
+
+ COPY_BACK(bind_interdomain);
+ COPY_BACK(bind_virq);
+ COPY_BACK(bind_pirq);
+ COPY_BACK(status);
+ COPY_BACK(alloc_unbound);
+ COPY_BACK(bind_ipi);
+#undef COPY_BACK
+
+ default:
+ WARN_ON(rc != -ENOSYS);
+ break;
+ }
+
+ return rc;
+}
+EXPORT_SYMBOL_GPL(xen_event_channel_op_compat);
+
+int HYPERVISOR_physdev_op_compat(int cmd, void *arg)
+{
+ struct physdev_op op;
+ int rc;
+
+ op.cmd = cmd;
+ memcpy(&op.u, arg, sizeof(op.u));
+ rc = _hypercall1(int, physdev_op_compat, &op);
+
+ switch (cmd) {
+ case PHYSDEVOP_IRQ_UNMASK_NOTIFY:
+ case PHYSDEVOP_set_iopl:
+ case PHYSDEVOP_set_iobitmap:
+ case PHYSDEVOP_apic_write:
+ /* no output */
+ break;
+
+#define COPY_BACK(pop, fld) \
+ case PHYSDEVOP_##pop: \
+ memcpy(arg, &op.u.fld, sizeof(op.u.fld)); \
+ break
+
+ COPY_BACK(irq_status_query, irq_status_query);
+ COPY_BACK(apic_read, apic_op);
+ COPY_BACK(ASSIGN_VECTOR, irq_op);
+#undef COPY_BACK
+
+ default:
+ WARN_ON(rc != -ENOSYS);
+ break;
+ }
+
+ return rc;
+}
diff --git a/drivers/xen/privcmd.c b/drivers/xen/privcmd.c
index 8adb9cc267f9..71f5c459b088 100644
--- a/drivers/xen/privcmd.c
+++ b/drivers/xen/privcmd.c
@@ -361,13 +361,13 @@ static long privcmd_ioctl_mmap_batch(void __user *udata, int version)
down_write(&mm->mmap_sem);
vma = find_vma(mm, m.addr);
- ret = -EINVAL;
if (!vma ||
vma->vm_ops != &privcmd_vm_ops ||
(m.addr != vma->vm_start) ||
((m.addr + (nr_pages << PAGE_SHIFT)) != vma->vm_end) ||
!privcmd_enforce_singleshot_mapping(vma)) {
up_write(&mm->mmap_sem);
+ ret = -EINVAL;
goto out;
}
@@ -383,12 +383,16 @@ static long privcmd_ioctl_mmap_batch(void __user *udata, int version)
up_write(&mm->mmap_sem);
- if (state.global_error && (version == 1)) {
- /* Write back errors in second pass. */
- state.user_mfn = (xen_pfn_t *)m.arr;
- state.err = err_array;
- ret = traverse_pages(m.num, sizeof(xen_pfn_t),
- &pagelist, mmap_return_errors_v1, &state);
+ if (version == 1) {
+ if (state.global_error) {
+ /* Write back errors in second pass. */
+ state.user_mfn = (xen_pfn_t *)m.arr;
+ state.err = err_array;
+ ret = traverse_pages(m.num, sizeof(xen_pfn_t),
+ &pagelist, mmap_return_errors_v1, &state);
+ } else
+ ret = 0;
+
} else if (version == 2) {
ret = __copy_to_user(m.err, err_array, m.num * sizeof(int));
if (ret)